mirror of
https://github.com/postgres/postgres.git
synced 2025-06-26 12:21:12 +03:00
pgindent run.
This commit is contained in:
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.84 2003/07/21 20:29:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.85 2003/08/04 00:43:11 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The old interface functions have been converted to macros
|
||||
@ -617,7 +617,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
|
||||
td->t_natts = numberOfAttributes;
|
||||
td->t_hoff = hoff;
|
||||
|
||||
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
|
||||
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
|
||||
td->t_infomask = HEAP_HASOID;
|
||||
|
||||
DataFill((char *) td + hoff,
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.65 2003/07/21 20:29:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.66 2003/08/04 00:43:11 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -162,9 +162,9 @@ index_formtuple(TupleDesc tupleDescriptor,
|
||||
if ((size & INDEX_SIZE_MASK) != size)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("index tuple requires %lu bytes, maximum size is %lu",
|
||||
(unsigned long) size,
|
||||
(unsigned long) INDEX_SIZE_MASK)));
|
||||
errmsg("index tuple requires %lu bytes, maximum size is %lu",
|
||||
(unsigned long) size,
|
||||
(unsigned long) INDEX_SIZE_MASK)));
|
||||
|
||||
infomask |= size;
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.75 2003/07/21 20:29:38 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.76 2003/08/04 00:43:12 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -24,13 +24,13 @@
|
||||
|
||||
|
||||
static void printtup_startup(DestReceiver *self, int operation,
|
||||
TupleDesc typeinfo);
|
||||
TupleDesc typeinfo);
|
||||
static void printtup(HeapTuple tuple, TupleDesc typeinfo,
|
||||
DestReceiver *self);
|
||||
DestReceiver *self);
|
||||
static void printtup_20(HeapTuple tuple, TupleDesc typeinfo,
|
||||
DestReceiver *self);
|
||||
DestReceiver *self);
|
||||
static void printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo,
|
||||
DestReceiver *self);
|
||||
DestReceiver *self);
|
||||
static void printtup_shutdown(DestReceiver *self);
|
||||
static void printtup_destroy(DestReceiver *self);
|
||||
|
||||
@ -81,8 +81,8 @@ printtup_create_DR(CommandDest dest, Portal portal)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* In protocol 2.0 the Bind message does not exist, so there is
|
||||
* no way for the columns to have different print formats; it's
|
||||
* In protocol 2.0 the Bind message does not exist, so there is no
|
||||
* way for the columns to have different print formats; it's
|
||||
* sufficient to look at the first one.
|
||||
*/
|
||||
if (portal->formats && portal->formats[0] != 0)
|
||||
@ -111,12 +111,13 @@ static void
|
||||
printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
|
||||
{
|
||||
DR_printtup *myState = (DR_printtup *) self;
|
||||
Portal portal = myState->portal;
|
||||
Portal portal = myState->portal;
|
||||
|
||||
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
|
||||
{
|
||||
/*
|
||||
* Send portal name to frontend (obsolete cruft, gone in proto 3.0)
|
||||
* Send portal name to frontend (obsolete cruft, gone in proto
|
||||
* 3.0)
|
||||
*
|
||||
* If portal name not specified, use "blank" portal.
|
||||
*/
|
||||
@ -129,8 +130,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a retrieve, and we are supposed to emit row descriptions,
|
||||
* then we send back the tuple descriptor of the tuples.
|
||||
* If this is a retrieve, and we are supposed to emit row
|
||||
* descriptions, then we send back the tuple descriptor of the tuples.
|
||||
*/
|
||||
if (operation == CMD_SELECT && myState->sendDescrip)
|
||||
{
|
||||
@ -163,7 +164,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
|
||||
* or some similar function; it does not contain a full set of fields.
|
||||
* The targetlist will be NIL when executing a utility function that does
|
||||
* not have a plan. If the targetlist isn't NIL then it is a Query node's
|
||||
* targetlist; it is up to us to ignore resjunk columns in it. The formats[]
|
||||
* targetlist; it is up to us to ignore resjunk columns in it. The formats[]
|
||||
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
|
||||
* send zeroes for the format codes in that case.
|
||||
*/
|
||||
@ -176,14 +177,14 @@ SendRowDescriptionMessage(TupleDesc typeinfo, List *targetlist, int16 *formats)
|
||||
int i;
|
||||
StringInfoData buf;
|
||||
|
||||
pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
|
||||
pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
|
||||
pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
|
||||
pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
|
||||
|
||||
for (i = 0; i < natts; ++i)
|
||||
{
|
||||
Oid atttypid = attrs[i]->atttypid;
|
||||
int32 atttypmod = attrs[i]->atttypmod;
|
||||
Oid basetype;
|
||||
Oid atttypid = attrs[i]->atttypid;
|
||||
int32 atttypmod = attrs[i]->atttypmod;
|
||||
Oid basetype;
|
||||
|
||||
pq_sendstring(&buf, NameStr(attrs[i]->attname));
|
||||
/* column ID info appears in protocol 3.0 and up */
|
||||
@ -320,8 +321,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have a toasted datum, forcibly detoast it here to
|
||||
* avoid memory leakage inside the type's output routine.
|
||||
* If we have a toasted datum, forcibly detoast it here to avoid
|
||||
* memory leakage inside the type's output routine.
|
||||
*/
|
||||
if (thisState->typisvarlena)
|
||||
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
|
||||
@ -347,7 +348,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
|
||||
attr,
|
||||
ObjectIdGetDatum(thisState->typelem)));
|
||||
ObjectIdGetDatum(thisState->typelem)));
|
||||
/* We assume the result will not have been toasted */
|
||||
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
|
||||
pq_sendbytes(&buf, VARDATA(outputbytes),
|
||||
@ -424,8 +425,8 @@ printtup_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
Assert(thisState->format == 0);
|
||||
|
||||
/*
|
||||
* If we have a toasted datum, forcibly detoast it here to
|
||||
* avoid memory leakage inside the type's output routine.
|
||||
* If we have a toasted datum, forcibly detoast it here to avoid
|
||||
* memory leakage inside the type's output routine.
|
||||
*/
|
||||
if (thisState->typisvarlena)
|
||||
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
|
||||
@ -536,9 +537,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
continue;
|
||||
getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
|
||||
&typoutput, &typelem, &typisvarlena);
|
||||
|
||||
/*
|
||||
* If we have a toasted datum, forcibly detoast it here to
|
||||
* avoid memory leakage inside the type's output routine.
|
||||
* If we have a toasted datum, forcibly detoast it here to avoid
|
||||
* memory leakage inside the type's output routine.
|
||||
*/
|
||||
if (typisvarlena)
|
||||
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
|
||||
@ -547,7 +549,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
value = DatumGetCString(OidFunctionCall3(typoutput,
|
||||
attr,
|
||||
ObjectIdGetDatum(typelem),
|
||||
ObjectIdGetDatum(typelem),
|
||||
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
|
||||
|
||||
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
|
||||
@ -627,8 +629,8 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
Assert(thisState->format == 1);
|
||||
|
||||
/*
|
||||
* If we have a toasted datum, forcibly detoast it here to
|
||||
* avoid memory leakage inside the type's output routine.
|
||||
* If we have a toasted datum, forcibly detoast it here to avoid
|
||||
* memory leakage inside the type's output routine.
|
||||
*/
|
||||
if (thisState->typisvarlena)
|
||||
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
|
||||
@ -637,7 +639,7 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
|
||||
attr,
|
||||
ObjectIdGetDatum(thisState->typelem)));
|
||||
ObjectIdGetDatum(thisState->typelem)));
|
||||
/* We assume the result will not have been toasted */
|
||||
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
|
||||
pq_sendbytes(&buf, VARDATA(outputbytes),
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.46 2003/08/04 00:43:12 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -104,11 +104,12 @@ gistrescan(PG_FUNCTION_ARGS)
|
||||
memmove(s->keyData,
|
||||
key,
|
||||
s->numberOfKeys * sizeof(ScanKeyData));
|
||||
|
||||
/*
|
||||
* Play games here with the scan key to use the Consistent
|
||||
* function for all comparisons: 1) the sk_procedure field
|
||||
* will now be used to hold the strategy number 2) the
|
||||
* sk_func field will point to the Consistent function
|
||||
* function for all comparisons: 1) the sk_procedure field will
|
||||
* now be used to hold the strategy number 2) the sk_func field
|
||||
* will point to the Consistent function
|
||||
*/
|
||||
for (i = 0; i < s->numberOfKeys; i++)
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.36 2003/06/22 22:04:54 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.37 2003/08/04 00:43:12 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
@ -60,9 +60,9 @@ hashfloat4(PG_FUNCTION_ARGS)
|
||||
float4 key = PG_GETARG_FLOAT4(0);
|
||||
|
||||
/*
|
||||
* On IEEE-float machines, minus zero and zero have different bit patterns
|
||||
* but should compare as equal. We must ensure that they have the same
|
||||
* hash value, which is most easily done this way:
|
||||
* On IEEE-float machines, minus zero and zero have different bit
|
||||
* patterns but should compare as equal. We must ensure that they
|
||||
* have the same hash value, which is most easily done this way:
|
||||
*/
|
||||
if (key == (float4) 0)
|
||||
PG_RETURN_UINT32(0);
|
||||
@ -76,9 +76,9 @@ hashfloat8(PG_FUNCTION_ARGS)
|
||||
float8 key = PG_GETARG_FLOAT8(0);
|
||||
|
||||
/*
|
||||
* On IEEE-float machines, minus zero and zero have different bit patterns
|
||||
* but should compare as equal. We must ensure that they have the same
|
||||
* hash value, which is most easily done this way:
|
||||
* On IEEE-float machines, minus zero and zero have different bit
|
||||
* patterns but should compare as equal. We must ensure that they
|
||||
* have the same hash value, which is most easily done this way:
|
||||
*/
|
||||
if (key == (float8) 0)
|
||||
PG_RETURN_UINT32(0);
|
||||
@ -121,9 +121,9 @@ hashtext(PG_FUNCTION_ARGS)
|
||||
Datum result;
|
||||
|
||||
/*
|
||||
* Note: this is currently identical in behavior to hashvarlena,
|
||||
* but it seems likely that we may need to do something different
|
||||
* in non-C locales. (See also hashbpchar, if so.)
|
||||
* Note: this is currently identical in behavior to hashvarlena, but
|
||||
* it seems likely that we may need to do something different in non-C
|
||||
* locales. (See also hashbpchar, if so.)
|
||||
*/
|
||||
result = hash_any((unsigned char *) VARDATA(key),
|
||||
VARSIZE(key) - VARHDRSZ);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.35 2003/07/21 20:29:38 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.36 2003/08/04 00:43:12 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Overflow pages look like ordinary relation pages.
|
||||
@ -205,8 +205,8 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
|
||||
if (++splitnum >= NCACHED)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("out of overflow pages in hash index \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
errmsg("out of overflow pages in hash index \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
metap->hashm_ovflpoint = splitnum;
|
||||
metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
|
||||
metap->hashm_spares[splitnum - 1]--;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.152 2003/07/21 20:29:38 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.153 2003/08/04 00:43:14 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -1132,6 +1132,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
|
||||
xlhdr.t_natts = tup->t_data->t_natts;
|
||||
xlhdr.t_infomask = tup->t_data->t_infomask;
|
||||
xlhdr.t_hoff = tup->t_data->t_hoff;
|
||||
|
||||
/*
|
||||
* note we mark rdata[1] as belonging to buffer; if XLogInsert
|
||||
* decides to write the whole page to the xlog, we don't need to
|
||||
@ -1149,9 +1150,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
|
||||
rdata[2].next = NULL;
|
||||
|
||||
/*
|
||||
* If this is the single and first tuple on page, we can reinit the
|
||||
* page instead of restoring the whole thing. Set flag, and hide
|
||||
* buffer references from XLogInsert.
|
||||
* If this is the single and first tuple on page, we can reinit
|
||||
* the page instead of restoring the whole thing. Set flag, and
|
||||
* hide buffer references from XLogInsert.
|
||||
*/
|
||||
if (ItemPointerGetOffsetNumber(&(tup->t_self)) == FirstOffsetNumber &&
|
||||
PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
|
||||
@ -1912,7 +1913,7 @@ log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *unused, int uncnt)
|
||||
|
||||
/*
|
||||
* The unused-offsets array is not actually in the buffer, but pretend
|
||||
* that it is. When XLogInsert stores the whole buffer, the offsets
|
||||
* that it is. When XLogInsert stores the whole buffer, the offsets
|
||||
* array need not be stored too.
|
||||
*/
|
||||
rdata[1].buffer = buffer;
|
||||
@ -1991,9 +1992,10 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
|
||||
2 * sizeof(TransactionId));
|
||||
hsize += 2 * sizeof(TransactionId);
|
||||
}
|
||||
|
||||
/*
|
||||
* As with insert records, we need not store the rdata[2] segment
|
||||
* if we decide to store the whole buffer instead.
|
||||
* As with insert records, we need not store the rdata[2] segment if
|
||||
* we decide to store the whole buffer instead.
|
||||
*/
|
||||
rdata[2].buffer = newbuf;
|
||||
rdata[2].data = (char *) &xlhdr;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.67 2003/07/21 20:29:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.68 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
* index_open - open an index relation by relation OID
|
||||
@ -300,7 +300,7 @@ index_beginscan(Relation heapRelation,
|
||||
* index_rescan - (re)start a scan of an index
|
||||
*
|
||||
* The caller may specify a new set of scankeys (but the number of keys
|
||||
* cannot change). To restart the scan without changing keys, pass NULL
|
||||
* cannot change). To restart the scan without changing keys, pass NULL
|
||||
* for the key array.
|
||||
*
|
||||
* Note that this is also called when first starting an indexscan;
|
||||
@ -394,8 +394,8 @@ index_restrpos(IndexScanDesc scan)
|
||||
|
||||
/*
|
||||
* We do not reset got_tuple; so if the scan is actually being
|
||||
* short-circuited by index_getnext, the effective position restoration
|
||||
* is done by restoring unique_tuple_pos.
|
||||
* short-circuited by index_getnext, the effective position
|
||||
* restoration is done by restoring unique_tuple_pos.
|
||||
*/
|
||||
scan->unique_tuple_pos = scan->unique_tuple_mark;
|
||||
|
||||
@ -427,24 +427,24 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we already got a tuple and it must be unique, there's no need
|
||||
* to make the index AM look through any additional tuples. (This can
|
||||
* If we already got a tuple and it must be unique, there's no need to
|
||||
* make the index AM look through any additional tuples. (This can
|
||||
* save a useful amount of work in scenarios where there are many dead
|
||||
* tuples due to heavy update activity.)
|
||||
*
|
||||
* To do this we must keep track of the logical scan position
|
||||
* (before/on/after tuple). Also, we have to be sure to release scan
|
||||
* resources before returning NULL; if we fail to do so then a multi-index
|
||||
* scan can easily run the system out of free buffers. We can release
|
||||
* index-level resources fairly cheaply by calling index_rescan. This
|
||||
* means there are two persistent states as far as the index AM is
|
||||
* concerned: on-tuple and rescanned. If we are actually asked to
|
||||
* re-fetch the single tuple, we have to go through a fresh indexscan
|
||||
* startup, which penalizes that (infrequent) case.
|
||||
* resources before returning NULL; if we fail to do so then a
|
||||
* multi-index scan can easily run the system out of free buffers. We
|
||||
* can release index-level resources fairly cheaply by calling
|
||||
* index_rescan. This means there are two persistent states as far as
|
||||
* the index AM is concerned: on-tuple and rescanned. If we are
|
||||
* actually asked to re-fetch the single tuple, we have to go through
|
||||
* a fresh indexscan startup, which penalizes that (infrequent) case.
|
||||
*/
|
||||
if (scan->keys_are_unique && scan->got_tuple)
|
||||
{
|
||||
int new_tuple_pos = scan->unique_tuple_pos;
|
||||
int new_tuple_pos = scan->unique_tuple_pos;
|
||||
|
||||
if (ScanDirectionIsForward(direction))
|
||||
{
|
||||
@ -459,22 +459,23 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
|
||||
if (new_tuple_pos == 0)
|
||||
{
|
||||
/*
|
||||
* We are moving onto the unique tuple from having been off it.
|
||||
* We just fall through and let the index AM do the work. Note
|
||||
* we should get the right answer regardless of scan direction.
|
||||
* We are moving onto the unique tuple from having been off
|
||||
* it. We just fall through and let the index AM do the work.
|
||||
* Note we should get the right answer regardless of scan
|
||||
* direction.
|
||||
*/
|
||||
scan->unique_tuple_pos = 0; /* need to update position */
|
||||
scan->unique_tuple_pos = 0; /* need to update position */
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Moving off the tuple; must do amrescan to release index-level
|
||||
* pins before we return NULL. Since index_rescan will reset
|
||||
* my state, must save and restore...
|
||||
* Moving off the tuple; must do amrescan to release
|
||||
* index-level pins before we return NULL. Since index_rescan
|
||||
* will reset my state, must save and restore...
|
||||
*/
|
||||
int unique_tuple_mark = scan->unique_tuple_mark;
|
||||
int unique_tuple_mark = scan->unique_tuple_mark;
|
||||
|
||||
index_rescan(scan, NULL /* no change to key */);
|
||||
index_rescan(scan, NULL /* no change to key */ );
|
||||
|
||||
scan->keys_are_unique = true;
|
||||
scan->got_tuple = true;
|
||||
@ -631,7 +632,7 @@ index_bulk_delete(Relation indexRelation,
|
||||
*/
|
||||
IndexBulkDeleteResult *
|
||||
index_vacuum_cleanup(Relation indexRelation,
|
||||
IndexVacuumCleanupInfo *info,
|
||||
IndexVacuumCleanupInfo * info,
|
||||
IndexBulkDeleteResult *stats)
|
||||
{
|
||||
RegProcedure procedure;
|
||||
@ -649,7 +650,7 @@ index_vacuum_cleanup(Relation indexRelation,
|
||||
DatumGetPointer(OidFunctionCall3(procedure,
|
||||
PointerGetDatum(indexRelation),
|
||||
PointerGetDatum((Pointer) info),
|
||||
PointerGetDatum((Pointer) stats)));
|
||||
PointerGetDatum((Pointer) stats)));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.102 2003/07/28 00:09:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.103 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -432,9 +432,9 @@ _bt_insertonpg(Relation rel,
|
||||
*
|
||||
* must write-lock that page before releasing write lock on
|
||||
* current page; else someone else's _bt_check_unique scan
|
||||
* could fail to see our insertion. write locks on intermediate
|
||||
* dead pages won't do because we don't know when they will get
|
||||
* de-linked from the tree.
|
||||
* could fail to see our insertion. write locks on
|
||||
* intermediate dead pages won't do because we don't know when
|
||||
* they will get de-linked from the tree.
|
||||
*/
|
||||
Buffer rbuf = InvalidBuffer;
|
||||
|
||||
@ -523,9 +523,10 @@ _bt_insertonpg(Relation rel,
|
||||
/*
|
||||
* If we are doing this insert because we split a page that was
|
||||
* the only one on its tree level, but was not the root, it may
|
||||
* have been the "fast root". We need to ensure that the fast root
|
||||
* link points at or above the current page. We can safely acquire
|
||||
* a lock on the metapage here --- see comments for _bt_newroot().
|
||||
* have been the "fast root". We need to ensure that the fast
|
||||
* root link points at or above the current page. We can safely
|
||||
* acquire a lock on the metapage here --- see comments for
|
||||
* _bt_newroot().
|
||||
*/
|
||||
if (split_only_page)
|
||||
{
|
||||
@ -1135,7 +1136,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
|
||||
*
|
||||
* On entry, buf and rbuf are the left and right split pages, which we
|
||||
* still hold write locks on per the L&Y algorithm. We release the
|
||||
* write locks once we have write lock on the parent page. (Any sooner,
|
||||
* write locks once we have write lock on the parent page. (Any sooner,
|
||||
* and it'd be possible for some other process to try to split or delete
|
||||
* one of these pages, and get confused because it cannot find the downlink.)
|
||||
*
|
||||
@ -1155,19 +1156,19 @@ _bt_insert_parent(Relation rel,
|
||||
bool is_only)
|
||||
{
|
||||
/*
|
||||
* Here we have to do something Lehman and Yao don't talk about:
|
||||
* deal with a root split and construction of a new root. If our
|
||||
* stack is empty then we have just split a node on what had been
|
||||
* the root level when we descended the tree. If it was still the
|
||||
* root then we perform a new-root construction. If it *wasn't*
|
||||
* the root anymore, search to find the next higher level that
|
||||
* someone constructed meanwhile, and find the right place to insert
|
||||
* as for the normal case.
|
||||
* Here we have to do something Lehman and Yao don't talk about: deal
|
||||
* with a root split and construction of a new root. If our stack is
|
||||
* empty then we have just split a node on what had been the root
|
||||
* level when we descended the tree. If it was still the root then we
|
||||
* perform a new-root construction. If it *wasn't* the root anymore,
|
||||
* search to find the next higher level that someone constructed
|
||||
* meanwhile, and find the right place to insert as for the normal
|
||||
* case.
|
||||
*
|
||||
* If we have to search for the parent level, we do so by
|
||||
* re-descending from the root. This is not super-efficient,
|
||||
* but it's rare enough not to matter. (This path is also taken
|
||||
* when called from WAL recovery --- we have no stack in that case.)
|
||||
* If we have to search for the parent level, we do so by re-descending
|
||||
* from the root. This is not super-efficient, but it's rare enough
|
||||
* not to matter. (This path is also taken when called from WAL
|
||||
* recovery --- we have no stack in that case.)
|
||||
*/
|
||||
if (is_root)
|
||||
{
|
||||
@ -1222,9 +1223,9 @@ _bt_insert_parent(Relation rel,
|
||||
/*
|
||||
* Find the parent buffer and get the parent page.
|
||||
*
|
||||
* Oops - if we were moved right then we need to change stack
|
||||
* item! We want to find parent pointing to where we are,
|
||||
* right ? - vadim 05/27/97
|
||||
* Oops - if we were moved right then we need to change stack item!
|
||||
* We want to find parent pointing to where we are, right ? -
|
||||
* vadim 05/27/97
|
||||
*/
|
||||
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
|
||||
bknum, P_HIKEY);
|
||||
@ -1296,16 +1297,16 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
|
||||
|
||||
/*
|
||||
* start = InvalidOffsetNumber means "search the whole page".
|
||||
* We need this test anyway due to possibility that
|
||||
* page has a high key now when it didn't before.
|
||||
* We need this test anyway due to possibility that page has a
|
||||
* high key now when it didn't before.
|
||||
*/
|
||||
if (start < minoff)
|
||||
start = minoff;
|
||||
|
||||
/*
|
||||
* These loops will check every item on the page --- but in an
|
||||
* order that's attuned to the probability of where it actually
|
||||
* is. Scan to the right first, then to the left.
|
||||
* order that's attuned to the probability of where it
|
||||
* actually is. Scan to the right first, then to the left.
|
||||
*/
|
||||
for (offnum = start;
|
||||
offnum <= maxoff;
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.66 2003/07/21 20:29:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.67 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Postgres btree pages look like ordinary relation pages. The opaque
|
||||
@ -181,8 +181,8 @@ _bt_getroot(Relation rel, int access)
|
||||
/*
|
||||
* Metadata initialized by someone else. In order to
|
||||
* guarantee no deadlocks, we have to release the metadata
|
||||
* page and start all over again. (Is that really true?
|
||||
* But it's hardly worth trying to optimize this case.)
|
||||
* page and start all over again. (Is that really true? But
|
||||
* it's hardly worth trying to optimize this case.)
|
||||
*/
|
||||
_bt_relbuf(rel, metabuf);
|
||||
return _bt_getroot(rel, access);
|
||||
@ -190,8 +190,8 @@ _bt_getroot(Relation rel, int access)
|
||||
|
||||
/*
|
||||
* Get, initialize, write, and leave a lock of the appropriate
|
||||
* type on the new root page. Since this is the first page in
|
||||
* the tree, it's a leaf as well as the root.
|
||||
* type on the new root page. Since this is the first page in the
|
||||
* tree, it's a leaf as well as the root.
|
||||
*/
|
||||
rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
|
||||
rootblkno = BufferGetBlockNumber(rootbuf);
|
||||
@ -240,7 +240,7 @@ _bt_getroot(Relation rel, int access)
|
||||
_bt_wrtnorelbuf(rel, rootbuf);
|
||||
|
||||
/*
|
||||
* swap root write lock for read lock. There is no danger of
|
||||
* swap root write lock for read lock. There is no danger of
|
||||
* anyone else accessing the new root page while it's unlocked,
|
||||
* since no one else knows where it is yet.
|
||||
*/
|
||||
@ -284,8 +284,8 @@ _bt_getroot(Relation rel, int access)
|
||||
}
|
||||
|
||||
/*
|
||||
* By here, we have a pin and read lock on the root page, and no
|
||||
* lock set on the metadata page. Return the root page's buffer.
|
||||
* By here, we have a pin and read lock on the root page, and no lock
|
||||
* set on the metadata page. Return the root page's buffer.
|
||||
*/
|
||||
return rootbuf;
|
||||
}
|
||||
@ -299,7 +299,7 @@ _bt_getroot(Relation rel, int access)
|
||||
* By the time we acquire lock on the root page, it might have been split and
|
||||
* not be the true root anymore. This is okay for the present uses of this
|
||||
* routine; we only really need to be able to move up at least one tree level
|
||||
* from whatever non-root page we were at. If we ever do need to lock the
|
||||
* from whatever non-root page we were at. If we ever do need to lock the
|
||||
* one true root page, we could loop here, re-reading the metapage on each
|
||||
* failure. (Note that it wouldn't do to hold the lock on the metapage while
|
||||
* moving to the root --- that'd deadlock against any concurrent root split.)
|
||||
@ -406,9 +406,9 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
|
||||
* First see if the FSM knows of any free pages.
|
||||
*
|
||||
* We can't trust the FSM's report unreservedly; we have to check
|
||||
* that the page is still free. (For example, an already-free page
|
||||
* could have been re-used between the time the last VACUUM scanned
|
||||
* it and the time the VACUUM made its FSM updates.)
|
||||
* that the page is still free. (For example, an already-free
|
||||
* page could have been re-used between the time the last VACUUM
|
||||
* scanned it and the time the VACUUM made its FSM updates.)
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
@ -431,10 +431,10 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
|
||||
/*
|
||||
* Extend the relation by one page.
|
||||
*
|
||||
* We have to use a lock to ensure no one else is extending the rel at
|
||||
* the same time, else we will both try to initialize the same new
|
||||
* page. We can skip locking for new or temp relations, however,
|
||||
* since no one else could be accessing them.
|
||||
* We have to use a lock to ensure no one else is extending the rel
|
||||
* at the same time, else we will both try to initialize the same
|
||||
* new page. We can skip locking for new or temp relations,
|
||||
* however, since no one else could be accessing them.
|
||||
*/
|
||||
needLock = !(rel->rd_isnew || rel->rd_istemp);
|
||||
|
||||
@ -444,8 +444,8 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
|
||||
buf = ReadBuffer(rel, P_NEW);
|
||||
|
||||
/*
|
||||
* Release the file-extension lock; it's now OK for someone else to
|
||||
* extend the relation some more.
|
||||
* Release the file-extension lock; it's now OK for someone else
|
||||
* to extend the relation some more.
|
||||
*/
|
||||
if (needLock)
|
||||
UnlockPage(rel, 0, ExclusiveLock);
|
||||
@ -484,7 +484,7 @@ _bt_relbuf(Relation rel, Buffer buf)
|
||||
* and a pin on the buffer.
|
||||
*
|
||||
* NOTE: actually, the buffer manager just marks the shared buffer page
|
||||
* dirty here; the real I/O happens later. This is okay since we are not
|
||||
* dirty here; the real I/O happens later. This is okay since we are not
|
||||
* relying on write ordering anyway. The WAL mechanism is responsible for
|
||||
* guaranteeing correctness after a crash.
|
||||
*/
|
||||
@ -534,13 +534,14 @@ _bt_page_recyclable(Page page)
|
||||
BTPageOpaque opaque;
|
||||
|
||||
/*
|
||||
* It's possible to find an all-zeroes page in an index --- for example,
|
||||
* a backend might successfully extend the relation one page and then
|
||||
* crash before it is able to make a WAL entry for adding the page.
|
||||
* If we find a zeroed page then reclaim it.
|
||||
* It's possible to find an all-zeroes page in an index --- for
|
||||
* example, a backend might successfully extend the relation one page
|
||||
* and then crash before it is able to make a WAL entry for adding the
|
||||
* page. If we find a zeroed page then reclaim it.
|
||||
*/
|
||||
if (PageIsNew(page))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Otherwise, recycle if deleted and too old to have any processes
|
||||
* interested in it.
|
||||
@ -565,7 +566,7 @@ _bt_page_recyclable(Page page)
|
||||
* mistake. On exit, metapage data is correct and we no longer have
|
||||
* a pin or lock on the metapage.
|
||||
*
|
||||
* Actually this is not used for splitting on-the-fly anymore. It's only used
|
||||
* Actually this is not used for splitting on-the-fly anymore. It's only used
|
||||
* in nbtsort.c at the completion of btree building, where we know we have
|
||||
* sole access to the index anyway.
|
||||
*/
|
||||
@ -623,7 +624,7 @@ _bt_metaproot(Relation rel, BlockNumber rootbknum, uint32 level)
|
||||
/*
|
||||
* Delete item(s) from a btree page.
|
||||
*
|
||||
* This must only be used for deleting leaf items. Deleting an item on a
|
||||
* This must only be used for deleting leaf items. Deleting an item on a
|
||||
* non-leaf page has to be done as part of an atomic action that includes
|
||||
* deleting the page it points to.
|
||||
*
|
||||
@ -646,9 +647,7 @@ _bt_delitems(Relation rel, Buffer buf,
|
||||
* adjusting item numbers for previous deletions.
|
||||
*/
|
||||
for (i = nitems - 1; i >= 0; i--)
|
||||
{
|
||||
PageIndexTupleDelete(page, itemnos[i]);
|
||||
}
|
||||
|
||||
/* XLOG stuff */
|
||||
if (!rel->rd_istemp)
|
||||
@ -666,8 +665,8 @@ _bt_delitems(Relation rel, Buffer buf,
|
||||
rdata[0].next = &(rdata[1]);
|
||||
|
||||
/*
|
||||
* The target-offsets array is not in the buffer, but pretend
|
||||
* that it is. When XLogInsert stores the whole buffer, the offsets
|
||||
* The target-offsets array is not in the buffer, but pretend that
|
||||
* it is. When XLogInsert stores the whole buffer, the offsets
|
||||
* array need not be stored too.
|
||||
*/
|
||||
rdata[1].buffer = buf;
|
||||
@ -701,7 +700,7 @@ _bt_delitems(Relation rel, Buffer buf,
|
||||
* may currently be trying to follow links leading to the page; they have to
|
||||
* be allowed to use its right-link to recover. See nbtree/README.
|
||||
*
|
||||
* On entry, the target buffer must be pinned and read-locked. This lock and
|
||||
* On entry, the target buffer must be pinned and read-locked. This lock and
|
||||
* pin will be dropped before exiting.
|
||||
*
|
||||
* Returns the number of pages successfully deleted (zero on failure; could
|
||||
@ -714,7 +713,7 @@ _bt_delitems(Relation rel, Buffer buf,
|
||||
int
|
||||
_bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
{
|
||||
BlockNumber target,
|
||||
BlockNumber target,
|
||||
leftsib,
|
||||
rightsib,
|
||||
parent;
|
||||
@ -740,17 +739,18 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
BTPageOpaque opaque;
|
||||
|
||||
/*
|
||||
* We can never delete rightmost pages nor root pages. While at it,
|
||||
* We can never delete rightmost pages nor root pages. While at it,
|
||||
* check that page is not already deleted and is empty.
|
||||
*/
|
||||
page = BufferGetPage(buf);
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
|
||||
P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
|
||||
P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
|
||||
{
|
||||
_bt_relbuf(rel, buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save info about page, including a copy of its high key (it must
|
||||
* have one, being non-rightmost).
|
||||
@ -760,12 +760,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
leftsib = opaque->btpo_prev;
|
||||
itemid = PageGetItemId(page, P_HIKEY);
|
||||
targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid));
|
||||
|
||||
/*
|
||||
* We need to get an approximate pointer to the page's parent page.
|
||||
* Use the standard search mechanism to search for the page's high key;
|
||||
* this will give us a link to either the current parent or someplace
|
||||
* to its left (if there are multiple equal high keys). To avoid
|
||||
* deadlocks, we'd better drop the target page lock first.
|
||||
* Use the standard search mechanism to search for the page's high
|
||||
* key; this will give us a link to either the current parent or
|
||||
* someplace to its left (if there are multiple equal high keys). To
|
||||
* avoid deadlocks, we'd better drop the target page lock first.
|
||||
*/
|
||||
_bt_relbuf(rel, buf);
|
||||
/* we need a scan key to do our search, so build one */
|
||||
@ -775,9 +776,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
&lbuf, BT_READ);
|
||||
/* don't need a pin on that either */
|
||||
_bt_relbuf(rel, lbuf);
|
||||
|
||||
/*
|
||||
* If we are trying to delete an interior page, _bt_search did more
|
||||
* than we needed. Locate the stack item pointing to our parent level.
|
||||
* than we needed. Locate the stack item pointing to our parent
|
||||
* level.
|
||||
*/
|
||||
ilevel = 0;
|
||||
for (;;)
|
||||
@ -789,10 +792,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
stack = stack->bts_parent;
|
||||
ilevel++;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to lock the pages we need to modify in the standard order:
|
||||
* moving right, then up. Else we will deadlock against other writers.
|
||||
*
|
||||
* moving right, then up. Else we will deadlock against other
|
||||
* writers.
|
||||
*
|
||||
* So, we need to find and write-lock the current left sibling of the
|
||||
* target page. The sibling that was current a moment ago could have
|
||||
* split, so we may have to move right. This search could fail if
|
||||
@ -823,21 +828,24 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
}
|
||||
else
|
||||
lbuf = InvalidBuffer;
|
||||
|
||||
/*
|
||||
* Next write-lock the target page itself. It should be okay to take just
|
||||
* a write lock not a superexclusive lock, since no scans would stop on an
|
||||
* empty page.
|
||||
* Next write-lock the target page itself. It should be okay to take
|
||||
* just a write lock not a superexclusive lock, since no scans would
|
||||
* stop on an empty page.
|
||||
*/
|
||||
buf = _bt_getbuf(rel, target, BT_WRITE);
|
||||
page = BufferGetPage(buf);
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
/*
|
||||
* Check page is still empty etc, else abandon deletion. The empty check
|
||||
* is necessary since someone else might have inserted into it while
|
||||
* we didn't have it locked; the others are just for paranoia's sake.
|
||||
* Check page is still empty etc, else abandon deletion. The empty
|
||||
* check is necessary since someone else might have inserted into it
|
||||
* while we didn't have it locked; the others are just for paranoia's
|
||||
* sake.
|
||||
*/
|
||||
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
|
||||
P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
|
||||
P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
|
||||
{
|
||||
_bt_relbuf(rel, buf);
|
||||
if (BufferIsValid(lbuf))
|
||||
@ -846,14 +854,17 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
}
|
||||
if (opaque->btpo_prev != leftsib)
|
||||
elog(ERROR, "left link changed unexpectedly");
|
||||
|
||||
/*
|
||||
* And next write-lock the (current) right sibling.
|
||||
*/
|
||||
rightsib = opaque->btpo_next;
|
||||
rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
|
||||
|
||||
/*
|
||||
* Next find and write-lock the current parent of the target page.
|
||||
* This is essentially the same as the corresponding step of splitting.
|
||||
* This is essentially the same as the corresponding step of
|
||||
* splitting.
|
||||
*/
|
||||
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
|
||||
target, P_HIKEY);
|
||||
@ -863,10 +874,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
RelationGetRelationName(rel));
|
||||
parent = stack->bts_blkno;
|
||||
poffset = stack->bts_offset;
|
||||
|
||||
/*
|
||||
* If the target is the rightmost child of its parent, then we can't
|
||||
* delete, unless it's also the only child --- in which case the parent
|
||||
* changes to half-dead status.
|
||||
* delete, unless it's also the only child --- in which case the
|
||||
* parent changes to half-dead status.
|
||||
*/
|
||||
page = BufferGetPage(pbuf);
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
@ -893,12 +905,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
if (OffsetNumberNext(P_FIRSTDATAKEY(opaque)) == maxoff)
|
||||
parent_one_child = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are deleting the next-to-last page on the target's level,
|
||||
* then the rightsib is a candidate to become the new fast root.
|
||||
* (In theory, it might be possible to push the fast root even further
|
||||
* down, but the odds of doing so are slim, and the locking considerations
|
||||
* daunting.)
|
||||
* then the rightsib is a candidate to become the new fast root. (In
|
||||
* theory, it might be possible to push the fast root even further
|
||||
* down, but the odds of doing so are slim, and the locking
|
||||
* considerations daunting.)
|
||||
*
|
||||
* We can safely acquire a lock on the metapage here --- see comments for
|
||||
* _bt_newroot().
|
||||
@ -914,12 +927,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
|
||||
metapg = BufferGetPage(metabuf);
|
||||
metad = BTPageGetMeta(metapg);
|
||||
|
||||
/*
|
||||
* The expected case here is btm_fastlevel == targetlevel+1;
|
||||
* if the fastlevel is <= targetlevel, something is wrong, and we
|
||||
* choose to overwrite it to fix it.
|
||||
* if the fastlevel is <= targetlevel, something is wrong, and
|
||||
* we choose to overwrite it to fix it.
|
||||
*/
|
||||
if (metad->btm_fastlevel > targetlevel+1)
|
||||
if (metad->btm_fastlevel > targetlevel + 1)
|
||||
{
|
||||
/* no update wanted */
|
||||
_bt_relbuf(rel, metabuf);
|
||||
@ -937,9 +951,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
|
||||
/*
|
||||
* Update parent. The normal case is a tad tricky because we want to
|
||||
* delete the target's downlink and the *following* key. Easiest way is
|
||||
* to copy the right sibling's downlink over the target downlink, and then
|
||||
* delete the following item.
|
||||
* delete the target's downlink and the *following* key. Easiest way
|
||||
* is to copy the right sibling's downlink over the target downlink,
|
||||
* and then delete the following item.
|
||||
*/
|
||||
page = BufferGetPage(pbuf);
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
@ -950,7 +964,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
}
|
||||
else
|
||||
{
|
||||
OffsetNumber nextoffset;
|
||||
OffsetNumber nextoffset;
|
||||
|
||||
itemid = PageGetItemId(page, poffset);
|
||||
btitem = (BTItem) PageGetItem(page, itemid);
|
||||
@ -968,8 +982,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
}
|
||||
|
||||
/*
|
||||
* Update siblings' side-links. Note the target page's side-links will
|
||||
* continue to point to the siblings.
|
||||
* Update siblings' side-links. Note the target page's side-links
|
||||
* will continue to point to the siblings.
|
||||
*/
|
||||
if (BufferIsValid(lbuf))
|
||||
{
|
||||
@ -1096,10 +1110,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
|
||||
_bt_wrtbuf(rel, lbuf);
|
||||
|
||||
/*
|
||||
* If parent became half dead, recurse to try to delete it. Otherwise,
|
||||
* if right sibling is empty and is now the last child of the parent,
|
||||
* recurse to try to delete it. (These cases cannot apply at the same
|
||||
* time, though the second case might itself recurse to the first.)
|
||||
* If parent became half dead, recurse to try to delete it.
|
||||
* Otherwise, if right sibling is empty and is now the last child of
|
||||
* the parent, recurse to try to delete it. (These cases cannot apply
|
||||
* at the same time, though the second case might itself recurse to
|
||||
* the first.)
|
||||
*/
|
||||
if (parent_half_dead)
|
||||
{
|
||||
|
@ -12,7 +12,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.103 2003/07/21 20:29:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.104 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -580,19 +580,20 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* The outer loop iterates over index leaf pages, the inner over items
|
||||
* on a leaf page. We issue just one _bt_delitems() call per page,
|
||||
* so as to minimize WAL traffic.
|
||||
* on a leaf page. We issue just one _bt_delitems() call per page, so
|
||||
* as to minimize WAL traffic.
|
||||
*
|
||||
* Note that we exclusive-lock every leaf page containing data items,
|
||||
* in sequence left to right. It sounds attractive to only exclusive-lock
|
||||
* those containing items we need to delete, but unfortunately that
|
||||
* is not safe: we could then pass a stopped indexscan, which could
|
||||
* in rare cases lead to deleting the item it needs to find when it
|
||||
* resumes. (See _bt_restscan --- this could only happen if an indexscan
|
||||
* stops on a deletable item and then a page split moves that item
|
||||
* into a page further to its right, which the indexscan will have no
|
||||
* pin on.) We can skip obtaining exclusive lock on empty pages
|
||||
* though, since no indexscan could be stopped on those.
|
||||
* Note that we exclusive-lock every leaf page containing data items, in
|
||||
* sequence left to right. It sounds attractive to only
|
||||
* exclusive-lock those containing items we need to delete, but
|
||||
* unfortunately that is not safe: we could then pass a stopped
|
||||
* indexscan, which could in rare cases lead to deleting the item it
|
||||
* needs to find when it resumes. (See _bt_restscan --- this could
|
||||
* only happen if an indexscan stops on a deletable item and then a
|
||||
* page split moves that item into a page further to its right, which
|
||||
* the indexscan will have no pin on.) We can skip obtaining
|
||||
* exclusive lock on empty pages though, since no indexscan could be
|
||||
* stopped on those.
|
||||
*/
|
||||
buf = _bt_get_endpoint(rel, 0, false);
|
||||
if (BufferIsValid(buf)) /* check for empty index */
|
||||
@ -604,7 +605,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
OffsetNumber offnum,
|
||||
minoff,
|
||||
maxoff;
|
||||
BlockNumber nextpage;
|
||||
BlockNumber nextpage;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
@ -622,12 +623,14 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
*/
|
||||
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
|
||||
LockBufferForCleanup(buf);
|
||||
|
||||
/*
|
||||
* Recompute minoff/maxoff, both of which could have changed
|
||||
* while we weren't holding the lock.
|
||||
* Recompute minoff/maxoff, both of which could have
|
||||
* changed while we weren't holding the lock.
|
||||
*/
|
||||
minoff = P_FIRSTDATAKEY(opaque);
|
||||
maxoff = PageGetMaxOffsetNumber(page);
|
||||
|
||||
/*
|
||||
* Scan over all items to see which ones need deleted
|
||||
* according to the callback function.
|
||||
@ -640,7 +643,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
ItemPointer htup;
|
||||
|
||||
btitem = (BTItem) PageGetItem(page,
|
||||
PageGetItemId(page, offnum));
|
||||
PageGetItemId(page, offnum));
|
||||
htup = &(btitem->bti_itup.t_tid);
|
||||
if (callback(htup, callback_state))
|
||||
{
|
||||
@ -651,6 +654,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
num_index_tuples += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we need to delete anything, do it and write the buffer;
|
||||
* else just release the buffer.
|
||||
@ -662,9 +666,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
|
||||
_bt_wrtbuf(rel, buf);
|
||||
}
|
||||
else
|
||||
{
|
||||
_bt_relbuf(rel, buf);
|
||||
}
|
||||
/* And advance to next page, if any */
|
||||
if (nextpage == P_NONE)
|
||||
break;
|
||||
@ -712,7 +714,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
/* No point in remembering more than MaxFSMPages pages */
|
||||
maxFreePages = MaxFSMPages;
|
||||
if ((BlockNumber) maxFreePages > num_pages)
|
||||
maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
|
||||
maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
|
||||
freePages = (BlockNumber *) palloc(maxFreePages * sizeof(BlockNumber));
|
||||
nFreePages = 0;
|
||||
|
||||
@ -728,10 +730,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
* after we start the scan will not be examined; this should be fine,
|
||||
* since they can't possibly be empty.)
|
||||
*/
|
||||
for (blkno = BTREE_METAPAGE+1; blkno < num_pages; blkno++)
|
||||
for (blkno = BTREE_METAPAGE + 1; blkno < num_pages; blkno++)
|
||||
{
|
||||
Buffer buf;
|
||||
Page page;
|
||||
Buffer buf;
|
||||
Page page;
|
||||
BTPageOpaque opaque;
|
||||
|
||||
buf = _bt_getbuf(rel, blkno, BT_READ);
|
||||
@ -753,7 +755,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page))
|
||||
{
|
||||
/* Empty, try to delete */
|
||||
int ndel;
|
||||
int ndel;
|
||||
|
||||
/* Run pagedel in a temp context to avoid memory leakage */
|
||||
MemoryContextReset(mycontext);
|
||||
@ -768,7 +770,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* During VACUUM FULL it's okay to recycle deleted pages
|
||||
* immediately, since there can be no other transactions
|
||||
* scanning the index. Note that we will only recycle the
|
||||
* scanning the index. Note that we will only recycle the
|
||||
* current page and not any parent pages that _bt_pagedel
|
||||
* might have recursed to; this seems reasonable in the name
|
||||
* of simplicity. (Trying to do otherwise would mean we'd
|
||||
@ -787,16 +789,16 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* During VACUUM FULL, we truncate off any recyclable pages at the
|
||||
* end of the index. In a normal vacuum it'd be unsafe to do this
|
||||
* except by acquiring exclusive lock on the index and then rechecking
|
||||
* all the pages; doesn't seem worth it.
|
||||
* During VACUUM FULL, we truncate off any recyclable pages at the end
|
||||
* of the index. In a normal vacuum it'd be unsafe to do this except
|
||||
* by acquiring exclusive lock on the index and then rechecking all
|
||||
* the pages; doesn't seem worth it.
|
||||
*/
|
||||
if (info->vacuum_full && nFreePages > 0)
|
||||
{
|
||||
BlockNumber new_pages = num_pages;
|
||||
BlockNumber new_pages = num_pages;
|
||||
|
||||
while (nFreePages > 0 && freePages[nFreePages-1] == new_pages-1)
|
||||
while (nFreePages > 0 && freePages[nFreePages - 1] == new_pages - 1)
|
||||
{
|
||||
new_pages--;
|
||||
pages_deleted--;
|
||||
@ -810,9 +812,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
* Okay to truncate.
|
||||
*
|
||||
* First, flush any shared buffers for the blocks we intend to
|
||||
* delete. FlushRelationBuffers is a bit more than we need for
|
||||
* this, since it will also write out dirty buffers for blocks we
|
||||
* aren't deleting, but it's the closest thing in bufmgr's API.
|
||||
* delete. FlushRelationBuffers is a bit more than we need
|
||||
* for this, since it will also write out dirty buffers for
|
||||
* blocks we aren't deleting, but it's the closest thing in
|
||||
* bufmgr's API.
|
||||
*/
|
||||
i = FlushRelationBuffers(rel, new_pages);
|
||||
if (i < 0)
|
||||
@ -822,7 +825,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
* Do the physical truncation.
|
||||
*/
|
||||
new_pages = smgrtruncate(DEFAULT_SMGR, rel, new_pages);
|
||||
rel->rd_nblocks = new_pages; /* update relcache immediately */
|
||||
rel->rd_nblocks = new_pages; /* update relcache
|
||||
* immediately */
|
||||
rel->rd_targblock = InvalidBlockNumber;
|
||||
num_pages = new_pages;
|
||||
}
|
||||
@ -856,7 +860,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
|
||||
* and so no deletion can have occurred on that page.
|
||||
*
|
||||
* On entry, we have a pin but no read lock on the buffer that contained
|
||||
* the index tuple we stopped the scan on. On exit, we have pin and read
|
||||
* the index tuple we stopped the scan on. On exit, we have pin and read
|
||||
* lock on the buffer that now contains that index tuple, and the scandesc's
|
||||
* current position is updated to point at it.
|
||||
*/
|
||||
@ -877,8 +881,8 @@ _bt_restscan(IndexScanDesc scan)
|
||||
BlockNumber blkno;
|
||||
|
||||
/*
|
||||
* Reacquire read lock on the buffer. (We should still have
|
||||
* a reference-count pin on it, so need not get that.)
|
||||
* Reacquire read lock on the buffer. (We should still have a
|
||||
* reference-count pin on it, so need not get that.)
|
||||
*/
|
||||
LockBuffer(buf, BT_READ);
|
||||
|
||||
@ -921,11 +925,11 @@ _bt_restscan(IndexScanDesc scan)
|
||||
|
||||
/*
|
||||
* The item we're looking for moved right at least one page, so
|
||||
* move right. We are careful here to pin and read-lock the next
|
||||
* non-dead page before releasing the current one. This ensures that
|
||||
* a concurrent btbulkdelete scan cannot pass our position --- if it
|
||||
* did, it might be able to reach and delete our target item before
|
||||
* we can find it again.
|
||||
* move right. We are careful here to pin and read-lock the next
|
||||
* non-dead page before releasing the current one. This ensures
|
||||
* that a concurrent btbulkdelete scan cannot pass our position
|
||||
* --- if it did, it might be able to reach and delete our target
|
||||
* item before we can find it again.
|
||||
*/
|
||||
if (P_RIGHTMOST(opaque))
|
||||
elog(ERROR, "failed to re-find previous key in \"%s\"",
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.77 2003/07/29 22:18:38 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.78 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -64,8 +64,8 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
|
||||
|
||||
/*
|
||||
* Race -- the page we just grabbed may have split since we read
|
||||
* its pointer in the parent (or metapage). If it has, we may need
|
||||
* to move right to its new sibling. Do that.
|
||||
* its pointer in the parent (or metapage). If it has, we may
|
||||
* need to move right to its new sibling. Do that.
|
||||
*/
|
||||
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
|
||||
|
||||
@ -87,14 +87,14 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
|
||||
par_blkno = BufferGetBlockNumber(*bufP);
|
||||
|
||||
/*
|
||||
* We need to save the location of the index entry we chose in
|
||||
* the parent page on a stack. In case we split the tree, we'll
|
||||
* use the stack to work back up to the parent page. We also save
|
||||
* the actual downlink (TID) to uniquely identify the index entry,
|
||||
* in case it moves right while we're working lower in the
|
||||
* tree. See the paper by Lehman and Yao for how this is detected
|
||||
* and handled. (We use the child link to disambiguate duplicate
|
||||
* keys in the index -- Lehman and Yao disallow duplicate keys.)
|
||||
* We need to save the location of the index entry we chose in the
|
||||
* parent page on a stack. In case we split the tree, we'll use
|
||||
* the stack to work back up to the parent page. We also save the
|
||||
* actual downlink (TID) to uniquely identify the index entry, in
|
||||
* case it moves right while we're working lower in the tree. See
|
||||
* the paper by Lehman and Yao for how this is detected and
|
||||
* handled. (We use the child link to disambiguate duplicate keys
|
||||
* in the index -- Lehman and Yao disallow duplicate keys.)
|
||||
*/
|
||||
new_stack = (BTStack) palloc(sizeof(BTStackData));
|
||||
new_stack->bts_blkno = par_blkno;
|
||||
@ -151,8 +151,8 @@ _bt_moveright(Relation rel,
|
||||
* might not need to move right; have to scan the page first anyway.)
|
||||
* It could even have split more than once, so scan as far as needed.
|
||||
*
|
||||
* We also have to move right if we followed a link that brought us to
|
||||
* a dead page.
|
||||
* We also have to move right if we followed a link that brought us to a
|
||||
* dead page.
|
||||
*/
|
||||
while (!P_RIGHTMOST(opaque) &&
|
||||
(P_IGNORE(opaque) ||
|
||||
@ -599,8 +599,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
/*
|
||||
* At this point we are positioned at the first item >= scan key, or
|
||||
* possibly at the end of a page on which all the existing items are
|
||||
* less than the scan key and we know that everything on later
|
||||
* pages is greater than or equal to scan key.
|
||||
* less than the scan key and we know that everything on later pages
|
||||
* is greater than or equal to scan key.
|
||||
*
|
||||
* We could step forward in the latter case, but that'd be a waste of
|
||||
* time if we want to scan backwards. So, it's now time to examine
|
||||
@ -851,7 +851,8 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
else /* backwards scan */
|
||||
else
|
||||
/* backwards scan */
|
||||
{
|
||||
if (offnum > P_FIRSTDATAKEY(opaque))
|
||||
offnum = OffsetNumberPrev(offnum);
|
||||
@ -860,9 +861,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
|
||||
/*
|
||||
* Walk left to the next page with data. This is much more
|
||||
* complex than the walk-right case because of the possibility
|
||||
* that the page to our left splits while we are in flight to it,
|
||||
* plus the possibility that the page we were on gets deleted
|
||||
* after we leave it. See nbtree/README for details.
|
||||
* that the page to our left splits while we are in flight to
|
||||
* it, plus the possibility that the page we were on gets
|
||||
* deleted after we leave it. See nbtree/README for details.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
@ -877,10 +878,11 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
|
||||
}
|
||||
page = BufferGetPage(*bufP);
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
/*
|
||||
* Okay, we managed to move left to a non-deleted page.
|
||||
* Done if it's not half-dead and not empty. Else loop back
|
||||
* and do it all again.
|
||||
* Done if it's not half-dead and not empty. Else loop
|
||||
* back and do it all again.
|
||||
*/
|
||||
if (!P_IGNORE(opaque))
|
||||
{
|
||||
@ -946,17 +948,18 @@ _bt_walk_left(Relation rel, Buffer buf)
|
||||
buf = _bt_getbuf(rel, blkno, BT_READ);
|
||||
page = BufferGetPage(buf);
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
/*
|
||||
* If this isn't the page we want, walk right till we find
|
||||
* what we want --- but go no more than four hops (an
|
||||
* arbitrary limit). If we don't find the correct page by then,
|
||||
* the most likely bet is that the original page got deleted
|
||||
* and isn't in the sibling chain at all anymore, not that its
|
||||
* left sibling got split more than four times.
|
||||
* If this isn't the page we want, walk right till we find what we
|
||||
* want --- but go no more than four hops (an arbitrary limit).
|
||||
* If we don't find the correct page by then, the most likely bet
|
||||
* is that the original page got deleted and isn't in the sibling
|
||||
* chain at all anymore, not that its left sibling got split more
|
||||
* than four times.
|
||||
*
|
||||
* Note that it is correct to test P_ISDELETED not P_IGNORE
|
||||
* here, because half-dead pages are still in the sibling
|
||||
* chain. Caller must reject half-dead pages if wanted.
|
||||
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
|
||||
* because half-dead pages are still in the sibling chain. Caller
|
||||
* must reject half-dead pages if wanted.
|
||||
*/
|
||||
tries = 0;
|
||||
for (;;)
|
||||
@ -983,8 +986,8 @@ _bt_walk_left(Relation rel, Buffer buf)
|
||||
if (P_ISDELETED(opaque))
|
||||
{
|
||||
/*
|
||||
* It was deleted. Move right to first nondeleted page
|
||||
* (there must be one); that is the page that has acquired the
|
||||
* It was deleted. Move right to first nondeleted page (there
|
||||
* must be one); that is the page that has acquired the
|
||||
* deleted one's keyspace, so stepping left from it will take
|
||||
* us where we want to be.
|
||||
*/
|
||||
@ -1001,18 +1004,18 @@ _bt_walk_left(Relation rel, Buffer buf)
|
||||
if (!P_ISDELETED(opaque))
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now return to top of loop, resetting obknum to
|
||||
* point to this nondeleted page, and try again.
|
||||
* Now return to top of loop, resetting obknum to point to
|
||||
* this nondeleted page, and try again.
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* It wasn't deleted; the explanation had better be
|
||||
* that the page to the left got split or deleted.
|
||||
* Without this check, we'd go into an infinite loop
|
||||
* if there's anything wrong.
|
||||
* It wasn't deleted; the explanation had better be that the
|
||||
* page to the left got split or deleted. Without this check,
|
||||
* we'd go into an infinite loop if there's anything wrong.
|
||||
*/
|
||||
if (opaque->btpo_prev == lblkno)
|
||||
elog(ERROR, "could not find left sibling in \"%s\"",
|
||||
@ -1028,7 +1031,7 @@ _bt_walk_left(Relation rel, Buffer buf)
|
||||
* _bt_get_endpoint() -- Find the first or last page on a given tree level
|
||||
*
|
||||
* If the index is empty, we will return InvalidBuffer; any other failure
|
||||
* condition causes ereport(). We will not return a dead page.
|
||||
* condition causes ereport(). We will not return a dead page.
|
||||
*
|
||||
* The returned buffer is pinned and read-locked.
|
||||
*/
|
||||
@ -1045,8 +1048,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
|
||||
|
||||
/*
|
||||
* If we are looking for a leaf page, okay to descend from fast root;
|
||||
* otherwise better descend from true root. (There is no point in being
|
||||
* smarter about intermediate levels.)
|
||||
* otherwise better descend from true root. (There is no point in
|
||||
* being smarter about intermediate levels.)
|
||||
*/
|
||||
if (level == 0)
|
||||
buf = _bt_getroot(rel, BT_READ);
|
||||
@ -1066,9 +1069,9 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
|
||||
{
|
||||
/*
|
||||
* If we landed on a deleted page, step right to find a live page
|
||||
* (there must be one). Also, if we want the rightmost page,
|
||||
* step right if needed to get to it (this could happen if the
|
||||
* page split since we obtained a pointer to it).
|
||||
* (there must be one). Also, if we want the rightmost page, step
|
||||
* right if needed to get to it (this could happen if the page
|
||||
* split since we obtained a pointer to it).
|
||||
*/
|
||||
while (P_IGNORE(opaque) ||
|
||||
(rightmost && !P_RIGHTMOST(opaque)))
|
||||
|
@ -36,7 +36,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.73 2003/07/21 20:29:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.74 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -93,7 +93,7 @@ typedef struct BTPageState
|
||||
|
||||
|
||||
static void _bt_blnewpage(Relation index, Buffer *buf, Page *page,
|
||||
uint32 level);
|
||||
uint32 level);
|
||||
static BTPageState *_bt_pagestate(Relation index, uint32 level);
|
||||
static void _bt_slideleft(Relation index, Buffer buf, Page page);
|
||||
static void _bt_sortaddtup(Page page, Size itemsize,
|
||||
@ -469,7 +469,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
|
||||
|
||||
oopaque->btpo_next = BufferGetBlockNumber(nbuf);
|
||||
nopaque->btpo_prev = BufferGetBlockNumber(obuf);
|
||||
nopaque->btpo_next = P_NONE; /* redundant */
|
||||
nopaque->btpo_next = P_NONE; /* redundant */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.3 2003/02/23 22:43:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -29,10 +29,10 @@
|
||||
typedef struct bt_incomplete_split
|
||||
{
|
||||
RelFileNode node; /* the index */
|
||||
BlockNumber leftblk; /* left half of split */
|
||||
BlockNumber rightblk; /* right half of split */
|
||||
BlockNumber leftblk; /* left half of split */
|
||||
BlockNumber rightblk; /* right half of split */
|
||||
bool is_root; /* we split the root */
|
||||
} bt_incomplete_split;
|
||||
} bt_incomplete_split;
|
||||
|
||||
static List *incomplete_splits;
|
||||
|
||||
@ -107,7 +107,7 @@ _bt_restore_page(Page page, char *from, int len)
|
||||
}
|
||||
|
||||
static void
|
||||
_bt_restore_meta(Relation reln, XLogRecPtr lsn,
|
||||
_bt_restore_meta(Relation reln, XLogRecPtr lsn,
|
||||
BlockNumber root, uint32 level,
|
||||
BlockNumber fastroot, uint32 fastlevel)
|
||||
{
|
||||
@ -172,7 +172,7 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
|
||||
if (!redo || !(record->xl_info & XLR_BKP_BLOCK_1))
|
||||
{
|
||||
buffer = XLogReadBuffer(false, reln,
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(PANIC, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
@ -183,13 +183,11 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
|
||||
if (redo)
|
||||
{
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
UnlockAndReleaseBuffer(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (PageAddItem(page, (Item) datapos, datalen,
|
||||
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
|
||||
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
|
||||
LP_USED) == InvalidOffsetNumber)
|
||||
elog(PANIC, "btree_insert_redo: failed to add item");
|
||||
|
||||
@ -204,13 +202,9 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
|
||||
elog(PANIC, "btree_insert_undo: bad page LSN");
|
||||
|
||||
if (!P_ISLEAF(pageop))
|
||||
{
|
||||
UnlockAndReleaseBuffer(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(PANIC, "btree_insert_undo: unimplemented");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -226,8 +220,8 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
|
||||
if (redo && !isleaf && incomplete_splits != NIL)
|
||||
{
|
||||
forget_matching_split(reln, xlrec->target.node,
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
|
||||
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
|
||||
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
|
||||
false);
|
||||
}
|
||||
}
|
||||
@ -238,9 +232,9 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
|
||||
Relation reln;
|
||||
BlockNumber targetblk;
|
||||
BlockNumber leftsib;
|
||||
BlockNumber rightsib;
|
||||
BlockNumber targetblk;
|
||||
BlockNumber leftsib;
|
||||
BlockNumber rightsib;
|
||||
Buffer buffer;
|
||||
Page page;
|
||||
BTPageOpaque pageop;
|
||||
@ -338,9 +332,7 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
|
||||
elog(PANIC, "btree_split_redo: uninitialized next right page");
|
||||
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
UnlockAndReleaseBuffer(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
@ -357,8 +349,8 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
|
||||
if (redo && xlrec->level > 0 && incomplete_splits != NIL)
|
||||
{
|
||||
forget_matching_split(reln, xlrec->target.node,
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
|
||||
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
|
||||
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
|
||||
false);
|
||||
}
|
||||
|
||||
@ -422,10 +414,10 @@ btree_xlog_delete_page(bool redo, bool ismeta,
|
||||
{
|
||||
xl_btree_delete_page *xlrec = (xl_btree_delete_page *) XLogRecGetData(record);
|
||||
Relation reln;
|
||||
BlockNumber parent;
|
||||
BlockNumber target;
|
||||
BlockNumber leftsib;
|
||||
BlockNumber rightsib;
|
||||
BlockNumber parent;
|
||||
BlockNumber target;
|
||||
BlockNumber leftsib;
|
||||
BlockNumber rightsib;
|
||||
Buffer buffer;
|
||||
Page page;
|
||||
BTPageOpaque pageop;
|
||||
@ -451,9 +443,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(PANIC, "btree_delete_page_redo: uninitialized parent page");
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
UnlockAndReleaseBuffer(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
OffsetNumber poffset;
|
||||
@ -469,7 +459,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
|
||||
{
|
||||
ItemId itemid;
|
||||
BTItem btitem;
|
||||
OffsetNumber nextoffset;
|
||||
OffsetNumber nextoffset;
|
||||
|
||||
itemid = PageGetItemId(page, poffset);
|
||||
btitem = (BTItem) PageGetItem(page, itemid);
|
||||
@ -494,9 +484,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(PANIC, "btree_delete_page_redo: uninitialized right sibling");
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
UnlockAndReleaseBuffer(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
@ -520,9 +508,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(PANIC, "btree_delete_page_redo: uninitialized left sibling");
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
UnlockAndReleaseBuffer(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
@ -799,116 +785,116 @@ btree_desc(char *buf, uint8 xl_info, char *rec)
|
||||
switch (info)
|
||||
{
|
||||
case XLOG_BTREE_INSERT_LEAF:
|
||||
{
|
||||
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
|
||||
{
|
||||
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
|
||||
|
||||
strcat(buf, "insert: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
break;
|
||||
}
|
||||
strcat(buf, "insert: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_INSERT_UPPER:
|
||||
{
|
||||
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
|
||||
{
|
||||
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
|
||||
|
||||
strcat(buf, "insert_upper: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
break;
|
||||
}
|
||||
strcat(buf, "insert_upper: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_INSERT_META:
|
||||
{
|
||||
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
|
||||
{
|
||||
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
|
||||
|
||||
strcat(buf, "insert_meta: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
break;
|
||||
}
|
||||
strcat(buf, "insert_meta: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_SPLIT_L:
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
|
||||
strcat(buf, "split_l: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
strcat(buf, "split_l: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_SPLIT_R:
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
|
||||
strcat(buf, "split_r: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
strcat(buf, "split_r: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_SPLIT_L_ROOT:
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
|
||||
strcat(buf, "split_l_root: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
strcat(buf, "split_l_root: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_SPLIT_R_ROOT:
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
{
|
||||
xl_btree_split *xlrec = (xl_btree_split *) rec;
|
||||
|
||||
strcat(buf, "split_r_root: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
strcat(buf, "split_r_root: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
|
||||
xlrec->otherblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_DELETE:
|
||||
{
|
||||
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
|
||||
{
|
||||
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
|
||||
|
||||
sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
|
||||
break;
|
||||
}
|
||||
sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_DELETE_PAGE:
|
||||
case XLOG_BTREE_DELETE_PAGE_META:
|
||||
{
|
||||
xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
|
||||
{
|
||||
xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
|
||||
|
||||
strcat(buf, "delete_page: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
|
||||
xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
strcat(buf, "delete_page: ");
|
||||
out_target(buf, &(xlrec->target));
|
||||
sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
|
||||
xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_NEWROOT:
|
||||
{
|
||||
xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
|
||||
{
|
||||
xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
|
||||
|
||||
sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode,
|
||||
xlrec->rootblk, xlrec->level);
|
||||
break;
|
||||
}
|
||||
sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode,
|
||||
xlrec->rootblk, xlrec->level);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_NEWMETA:
|
||||
{
|
||||
xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
|
||||
{
|
||||
xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
|
||||
|
||||
sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode,
|
||||
xlrec->meta.root, xlrec->meta.level,
|
||||
xlrec->meta.fastroot, xlrec->meta.fastlevel);
|
||||
break;
|
||||
}
|
||||
sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode,
|
||||
xlrec->meta.root, xlrec->meta.level,
|
||||
xlrec->meta.fastroot, xlrec->meta.fastlevel);
|
||||
break;
|
||||
}
|
||||
case XLOG_BTREE_NEWPAGE:
|
||||
{
|
||||
xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
|
||||
{
|
||||
xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
|
||||
|
||||
sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode,
|
||||
xlrec->blkno);
|
||||
break;
|
||||
}
|
||||
sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode,
|
||||
xlrec->blkno);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
strcat(buf, "UNKNOWN");
|
||||
break;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.46 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -109,10 +109,10 @@ rtrescan(PG_FUNCTION_ARGS)
|
||||
s->numberOfKeys * sizeof(ScanKeyData));
|
||||
|
||||
/*
|
||||
* Scans on internal pages use different operators than they
|
||||
* do on leaf pages. For example, if the user wants all boxes
|
||||
* that exactly match (x1,y1,x2,y2), then on internal pages we
|
||||
* need to find all boxes that contain (x1,y1,x2,y2).
|
||||
* Scans on internal pages use different operators than they do on
|
||||
* leaf pages. For example, if the user wants all boxes that
|
||||
* exactly match (x1,y1,x2,y2), then on internal pages we need to
|
||||
* find all boxes that contain (x1,y1,x2,y2).
|
||||
*/
|
||||
for (i = 0; i < s->numberOfKeys; i++)
|
||||
{
|
||||
|
@ -13,7 +13,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.16 2003/06/11 22:37:45 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.17 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -73,7 +73,7 @@
|
||||
|
||||
static SlruCtlData ClogCtlData;
|
||||
static SlruCtl ClogCtl = &ClogCtlData;
|
||||
|
||||
|
||||
|
||||
static int ZeroCLOGPage(int pageno, bool writeXlog);
|
||||
static bool CLOGPagePrecedes(int page1, int page2);
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Resource managers definition
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.10 2003/02/21 00:06:22 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.11 2003/08/04 00:43:15 momjian Exp $
|
||||
*/
|
||||
#include "postgres.h"
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
#include "commands/sequence.h"
|
||||
|
||||
|
||||
RmgrData RmgrTable[RM_MAX_ID+1] = {
|
||||
RmgrData RmgrTable[RM_MAX_ID + 1] = {
|
||||
{"XLOG", xlog_redo, xlog_undo, xlog_desc, NULL, NULL},
|
||||
{"Transaction", xact_redo, xact_undo, xact_desc, NULL, NULL},
|
||||
{"Storage", smgr_redo, smgr_undo, smgr_desc, NULL, NULL},
|
||||
@ -32,7 +32,7 @@ RmgrData RmgrTable[RM_MAX_ID+1] = {
|
||||
{"Reserved 9", NULL, NULL, NULL, NULL, NULL},
|
||||
{"Heap", heap_redo, heap_undo, heap_desc, NULL, NULL},
|
||||
{"Btree", btree_redo, btree_undo, btree_desc,
|
||||
btree_xlog_startup, btree_xlog_cleanup},
|
||||
btree_xlog_startup, btree_xlog_cleanup},
|
||||
{"Hash", hash_redo, hash_undo, hash_desc, NULL, NULL},
|
||||
{"Rtree", rtree_redo, rtree_undo, rtree_desc, NULL, NULL},
|
||||
{"Gist", gist_redo, gist_undo, gist_desc, NULL, NULL},
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.3 2003/07/28 00:09:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -93,7 +93,7 @@ typedef enum
|
||||
SLRU_PAGE_CLEAN, /* page is valid and not dirty */
|
||||
SLRU_PAGE_DIRTY, /* page is valid but needs write */
|
||||
SLRU_PAGE_WRITE_IN_PROGRESS /* page is being written out */
|
||||
} SlruPageStatus;
|
||||
} SlruPageStatus;
|
||||
|
||||
/*
|
||||
* Shared-memory state
|
||||
@ -117,7 +117,7 @@ typedef struct SlruSharedData
|
||||
* swapping out the latest page.
|
||||
*/
|
||||
int latest_page_number;
|
||||
} SlruSharedData;
|
||||
} SlruSharedData;
|
||||
typedef SlruSharedData *SlruShared;
|
||||
|
||||
|
||||
@ -145,7 +145,7 @@ typedef enum
|
||||
SLRU_SEEK_FAILED,
|
||||
SLRU_READ_FAILED,
|
||||
SLRU_WRITE_FAILED
|
||||
} SlruErrorCause;
|
||||
} SlruErrorCause;
|
||||
static SlruErrorCause slru_errcause;
|
||||
static int slru_errno;
|
||||
|
||||
@ -166,9 +166,9 @@ SimpleLruShmemSize(void)
|
||||
{
|
||||
return MAXALIGN(sizeof(SlruSharedData)) + BLCKSZ * NUM_CLOG_BUFFERS
|
||||
#ifdef EXEC_BACKEND
|
||||
+ MAXALIGN(sizeof(SlruLockData))
|
||||
+ MAXALIGN(sizeof(SlruLockData))
|
||||
#endif
|
||||
;
|
||||
;
|
||||
}
|
||||
|
||||
void
|
||||
@ -183,12 +183,14 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
|
||||
shared = (SlruShared) ptr;
|
||||
|
||||
#ifdef EXEC_BACKEND
|
||||
|
||||
/*
|
||||
* Locks are in shared memory
|
||||
*/
|
||||
locks = (SlruLock)(ptr + MAXALIGN(sizeof(SlruSharedData)) +
|
||||
BLCKSZ * NUM_CLOG_BUFFERS);
|
||||
locks = (SlruLock) (ptr + MAXALIGN(sizeof(SlruSharedData)) +
|
||||
BLCKSZ * NUM_CLOG_BUFFERS);
|
||||
#else
|
||||
|
||||
/*
|
||||
* Locks are in private memory
|
||||
*/
|
||||
@ -199,7 +201,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
|
||||
|
||||
|
||||
if (!IsUnderPostmaster)
|
||||
/* Initialize locks and shared memory area */
|
||||
/* Initialize locks and shared memory area */
|
||||
{
|
||||
char *bufptr;
|
||||
int slotno;
|
||||
@ -210,8 +212,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
|
||||
|
||||
memset(shared, 0, sizeof(SlruSharedData));
|
||||
|
||||
bufptr = (char *)shared + MAXALIGN(sizeof(SlruSharedData));
|
||||
|
||||
bufptr = (char *) shared + MAXALIGN(sizeof(SlruSharedData));
|
||||
|
||||
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
|
||||
{
|
||||
locks->BufferLocks[slotno] = LWLockAssign();
|
||||
@ -247,7 +249,7 @@ int
|
||||
SimpleLruZeroPage(SlruCtl ctl, int pageno)
|
||||
{
|
||||
int slotno;
|
||||
SlruShared shared = (SlruShared) ctl->shared;
|
||||
SlruShared shared = (SlruShared) ctl->shared;
|
||||
|
||||
/* Find a suitable buffer slot for the page */
|
||||
slotno = SlruSelectLRUPage(ctl, pageno);
|
||||
@ -285,7 +287,7 @@ SimpleLruZeroPage(SlruCtl ctl, int pageno)
|
||||
char *
|
||||
SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid, bool forwrite)
|
||||
{
|
||||
SlruShared shared = (SlruShared) ctl->shared;
|
||||
SlruShared shared = (SlruShared) ctl->shared;
|
||||
|
||||
/* Outer loop handles restart if we lose the buffer to someone else */
|
||||
for (;;)
|
||||
@ -383,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno)
|
||||
{
|
||||
int pageno;
|
||||
bool ok;
|
||||
SlruShared shared = (SlruShared) ctl->shared;
|
||||
SlruShared shared = (SlruShared) ctl->shared;
|
||||
|
||||
/* Do nothing if page does not need writing */
|
||||
if (shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
|
||||
@ -539,13 +541,13 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno)
|
||||
* possible for this to need to happen when writing a page that's not
|
||||
* first in its segment; we assume the OS can cope with that. (Note:
|
||||
* it might seem that it'd be okay to create files only when
|
||||
* SimpleLruZeroPage is called for the first page of a segment. However,
|
||||
* if after a crash and restart the REDO logic elects to replay the
|
||||
* log from a checkpoint before the latest one, then it's possible
|
||||
* that we will get commands to set transaction status of transactions
|
||||
* that have already been truncated from the commit log. Easiest way
|
||||
* to deal with that is to accept references to nonexistent files here
|
||||
* and in SlruPhysicalReadPage.)
|
||||
* SimpleLruZeroPage is called for the first page of a segment.
|
||||
* However, if after a crash and restart the REDO logic elects to
|
||||
* replay the log from a checkpoint before the latest one, then it's
|
||||
* possible that we will get commands to set transaction status of
|
||||
* transactions that have already been truncated from the commit log.
|
||||
* Easiest way to deal with that is to accept references to
|
||||
* nonexistent files here and in SlruPhysicalReadPage.)
|
||||
*/
|
||||
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
|
||||
if (fd < 0)
|
||||
@ -608,37 +610,37 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
|
||||
case SLRU_OPEN_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("open of file \"%s\" failed: %m",
|
||||
path)));
|
||||
break;
|
||||
case SLRU_CREATE_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("creation of file \"%s\" failed: %m",
|
||||
path)));
|
||||
break;
|
||||
case SLRU_SEEK_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("lseek of file \"%s\", offset %u failed: %m",
|
||||
path, offset)));
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("lseek of file \"%s\", offset %u failed: %m",
|
||||
path, offset)));
|
||||
break;
|
||||
case SLRU_READ_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("read of file \"%s\", offset %u failed: %m",
|
||||
path, offset)));
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("read of file \"%s\", offset %u failed: %m",
|
||||
path, offset)));
|
||||
break;
|
||||
case SLRU_WRITE_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("write of file \"%s\", offset %u failed: %m",
|
||||
path, offset)));
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("write of file \"%s\", offset %u failed: %m",
|
||||
path, offset)));
|
||||
break;
|
||||
default:
|
||||
/* can't get here, we trust */
|
||||
@ -665,6 +667,7 @@ static int
|
||||
SlruSelectLRUPage(SlruCtl ctl, int pageno)
|
||||
{
|
||||
SlruShared shared = (SlruShared) ctl->shared;
|
||||
|
||||
/* Outer loop handles restart after I/O */
|
||||
for (;;)
|
||||
{
|
||||
@ -689,7 +692,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
|
||||
if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
|
||||
return slotno;
|
||||
if (shared->page_lru_count[slotno] > bestcount &&
|
||||
shared->page_number[slotno] != shared->latest_page_number)
|
||||
shared->page_number[slotno] != shared->latest_page_number)
|
||||
{
|
||||
bestslot = slotno;
|
||||
bestcount = shared->page_lru_count[slotno];
|
||||
@ -705,12 +708,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
|
||||
/*
|
||||
* We need to do I/O. Normal case is that we have to write it
|
||||
* out, but it's possible in the worst case to have selected a
|
||||
* read-busy page. In that case we use SimpleLruReadPage to wait for
|
||||
* the read to complete.
|
||||
* read-busy page. In that case we use SimpleLruReadPage to wait
|
||||
* for the read to complete.
|
||||
*/
|
||||
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
|
||||
(void) SimpleLruReadPage(ctl, shared->page_number[bestslot],
|
||||
InvalidTransactionId, false);
|
||||
InvalidTransactionId, false);
|
||||
else
|
||||
SimpleLruWritePage(ctl, bestslot);
|
||||
|
||||
@ -747,10 +750,11 @@ SimpleLruFlush(SlruCtl ctl, bool checkpoint)
|
||||
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
|
||||
{
|
||||
SimpleLruWritePage(ctl, slotno);
|
||||
|
||||
/*
|
||||
* When called during a checkpoint,
|
||||
* we cannot assert that the slot is clean now, since another
|
||||
* process might have re-dirtied it already. That's okay.
|
||||
* When called during a checkpoint, we cannot assert that the slot
|
||||
* is clean now, since another process might have re-dirtied it
|
||||
* already. That's okay.
|
||||
*/
|
||||
Assert(checkpoint ||
|
||||
shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
|
||||
@ -792,10 +796,10 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
|
||||
CreateCheckPoint(false, true);
|
||||
|
||||
/*
|
||||
* Scan shared memory and remove any pages preceding the cutoff
|
||||
* page, to ensure we won't rewrite them later. (Any dirty pages
|
||||
* should have been flushed already during the checkpoint, we're just
|
||||
* being extra careful here.)
|
||||
* Scan shared memory and remove any pages preceding the cutoff page,
|
||||
* to ensure we won't rewrite them later. (Any dirty pages should
|
||||
* have been flushed already during the checkpoint, we're just being
|
||||
* extra careful here.)
|
||||
*/
|
||||
LWLockAcquire(ctl->locks->ControlLock, LW_EXCLUSIVE);
|
||||
|
||||
@ -870,7 +874,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
|
||||
if (cldir == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open directory \"%s\": %m", ctl->Dir)));
|
||||
errmsg("could not open directory \"%s\": %m", ctl->Dir)));
|
||||
|
||||
errno = 0;
|
||||
while ((clde = readdir(cldir)) != NULL)
|
||||
@ -898,7 +902,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
|
||||
if (errno)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not read directory \"%s\": %m", ctl->Dir)));
|
||||
errmsg("could not read directory \"%s\": %m", ctl->Dir)));
|
||||
closedir(cldir);
|
||||
|
||||
return found;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.149 2003/07/21 20:29:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.150 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Transaction aborts can now occur two ways:
|
||||
@ -92,7 +92,7 @@
|
||||
* AbortTransactionBlock
|
||||
*
|
||||
* These are invoked only in response to a user "BEGIN WORK", "COMMIT",
|
||||
* or "ROLLBACK" command. The tricky part about these functions
|
||||
* or "ROLLBACK" command. The tricky part about these functions
|
||||
* is that they are called within the postgres main loop, in between
|
||||
* the StartTransactionCommand() and CommitTransactionCommand().
|
||||
*
|
||||
@ -197,8 +197,8 @@ static TransactionStateData CurrentTransactionStateData = {
|
||||
0, /* scan command id */
|
||||
0x0, /* start time */
|
||||
TRANS_DEFAULT, /* transaction state */
|
||||
TBLOCK_DEFAULT /* transaction block state from
|
||||
the client perspective */
|
||||
TBLOCK_DEFAULT /* transaction block state from the client
|
||||
* perspective */
|
||||
};
|
||||
|
||||
TransactionState CurrentTransactionState = &CurrentTransactionStateData;
|
||||
@ -359,7 +359,7 @@ GetCurrentTransactionStartTimeUsec(int *msec)
|
||||
* TransactionIdIsCurrentTransactionId
|
||||
*
|
||||
* During bootstrap, we cheat and say "it's not my transaction ID" even though
|
||||
* it is. Along with transam.c's cheat to say that the bootstrap XID is
|
||||
* it is. Along with transam.c's cheat to say that the bootstrap XID is
|
||||
* already committed, this causes the tqual.c routines to see previously
|
||||
* inserted tuples as committed, which is what we need during bootstrap.
|
||||
*/
|
||||
@ -561,13 +561,13 @@ RecordTransactionCommit(void)
|
||||
|
||||
/*
|
||||
* We must mark the transaction committed in clog if its XID
|
||||
* appears either in permanent rels or in local temporary rels.
|
||||
* We test this by seeing if we made transaction-controlled
|
||||
* entries *OR* local-rel tuple updates. Note that if we made
|
||||
* only the latter, we have not emitted an XLOG record for our
|
||||
* commit, and so in the event of a crash the clog update might be
|
||||
* lost. This is okay because no one else will ever care whether
|
||||
* we committed.
|
||||
* appears either in permanent rels or in local temporary rels. We
|
||||
* test this by seeing if we made transaction-controlled entries
|
||||
* *OR* local-rel tuple updates. Note that if we made only the
|
||||
* latter, we have not emitted an XLOG record for our commit, and
|
||||
* so in the event of a crash the clog update might be lost. This
|
||||
* is okay because no one else will ever care whether we
|
||||
* committed.
|
||||
*/
|
||||
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
|
||||
TransactionIdCommit(xid);
|
||||
@ -755,9 +755,9 @@ AtAbort_Memory(void)
|
||||
{
|
||||
/*
|
||||
* Make sure we are in a valid context (not a child of
|
||||
* TopTransactionContext...). Note that it is possible for this
|
||||
* code to be called when we aren't in a transaction at all; go
|
||||
* directly to TopMemoryContext in that case.
|
||||
* TopTransactionContext...). Note that it is possible for this code
|
||||
* to be called when we aren't in a transaction at all; go directly to
|
||||
* TopMemoryContext in that case.
|
||||
*/
|
||||
if (TopTransactionContext != NULL)
|
||||
{
|
||||
@ -891,8 +891,8 @@ CommitTransaction(void)
|
||||
DeferredTriggerEndXact();
|
||||
|
||||
/*
|
||||
* Similarly, let ON COMMIT management do its thing before we start
|
||||
* to commit.
|
||||
* Similarly, let ON COMMIT management do its thing before we start to
|
||||
* commit.
|
||||
*/
|
||||
PreCommit_on_commit_actions();
|
||||
|
||||
@ -953,10 +953,10 @@ CommitTransaction(void)
|
||||
* noncritical resource releasing.
|
||||
*
|
||||
* The ordering of operations is not entirely random. The idea is:
|
||||
* release resources visible to other backends (eg, files, buffer pins);
|
||||
* then release locks; then release backend-local resources. We want
|
||||
* to release locks at the point where any backend waiting for us will
|
||||
* see our transaction as being fully cleaned up.
|
||||
* release resources visible to other backends (eg, files, buffer
|
||||
* pins); then release locks; then release backend-local resources.
|
||||
* We want to release locks at the point where any backend waiting for
|
||||
* us will see our transaction as being fully cleaned up.
|
||||
*/
|
||||
|
||||
smgrDoPendingDeletes(true);
|
||||
@ -1064,7 +1064,7 @@ AbortTransaction(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Post-abort cleanup. See notes in CommitTransaction() concerning
|
||||
* Post-abort cleanup. See notes in CommitTransaction() concerning
|
||||
* ordering.
|
||||
*/
|
||||
|
||||
@ -1194,8 +1194,8 @@ StartTransactionCommand(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We must switch to TopTransactionContext before returning. This
|
||||
* is already done if we called StartTransaction, otherwise not.
|
||||
* We must switch to TopTransactionContext before returning. This is
|
||||
* already done if we called StartTransaction, otherwise not.
|
||||
*/
|
||||
Assert(TopTransactionContext != NULL);
|
||||
MemoryContextSwitchTo(TopTransactionContext);
|
||||
@ -1370,9 +1370,10 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
|
||||
if (IsTransactionBlock())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
/* translator: %s represents an SQL statement name */
|
||||
/* translator: %s represents an SQL statement name */
|
||||
errmsg("%s cannot run inside a transaction block",
|
||||
stmtType)));
|
||||
|
||||
/*
|
||||
* Are we inside a function call? If the statement's parameter block
|
||||
* was allocated in QueryContext, assume it is an interactive command.
|
||||
@ -1381,8 +1382,8 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
|
||||
if (!MemoryContextContains(QueryContext, stmtNode))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
/* translator: %s represents an SQL statement name */
|
||||
errmsg("%s cannot be executed from a function", stmtType)));
|
||||
/* translator: %s represents an SQL statement name */
|
||||
errmsg("%s cannot be executed from a function", stmtType)));
|
||||
/* If we got past IsTransactionBlock test, should be in default state */
|
||||
if (CurrentTransactionState->blockState != TBLOCK_DEFAULT)
|
||||
elog(ERROR, "cannot prevent transaction chain");
|
||||
@ -1414,6 +1415,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
|
||||
*/
|
||||
if (IsTransactionBlock())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Are we inside a function call? If the statement's parameter block
|
||||
* was allocated in QueryContext, assume it is an interactive command.
|
||||
@ -1423,7 +1425,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
|
||||
return;
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
|
||||
/* translator: %s represents an SQL statement name */
|
||||
/* translator: %s represents an SQL statement name */
|
||||
errmsg("%s may only be used in BEGIN/END transaction blocks",
|
||||
stmtType)));
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.120 2003/07/28 00:09:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.121 2003/08/04 00:43:15 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1046,8 +1046,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
if (close(openLogFile) != 0)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
errmsg("close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
openLogFile = -1;
|
||||
}
|
||||
XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
|
||||
@ -1162,8 +1162,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
if (close(openLogFile) != 0)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
errmsg("close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
openLogFile = -1;
|
||||
}
|
||||
if (openLogFile < 0)
|
||||
@ -1266,7 +1266,7 @@ XLogFlush(XLogRecPtr record)
|
||||
XLogCtlInsert *Insert = &XLogCtl->Insert;
|
||||
uint32 freespace = INSERT_FREESPACE(Insert);
|
||||
|
||||
if (freespace < SizeOfXLogRecord) /* buffer is full */
|
||||
if (freespace < SizeOfXLogRecord) /* buffer is full */
|
||||
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
|
||||
else
|
||||
{
|
||||
@ -1449,8 +1449,8 @@ XLogFileInit(uint32 log, uint32 seg,
|
||||
if (fd < 0)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
|
||||
path, log, seg)));
|
||||
errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
|
||||
path, log, seg)));
|
||||
|
||||
return (fd);
|
||||
}
|
||||
@ -1563,14 +1563,14 @@ XLogFileOpen(uint32 log, uint32 seg, bool econt)
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
|
||||
path, log, seg)));
|
||||
errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
|
||||
path, log, seg)));
|
||||
return (fd);
|
||||
}
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
|
||||
path, log, seg)));
|
||||
errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
|
||||
path, log, seg)));
|
||||
}
|
||||
|
||||
return (fd);
|
||||
@ -1621,8 +1621,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
|
||||
if (xldir == NULL)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open transaction log directory \"%s\": %m",
|
||||
XLogDir)));
|
||||
errmsg("could not open transaction log directory \"%s\": %m",
|
||||
XLogDir)));
|
||||
|
||||
sprintf(lastoff, "%08X%08X", log, seg);
|
||||
|
||||
@ -1654,15 +1654,15 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
|
||||
true))
|
||||
{
|
||||
ereport(LOG,
|
||||
(errmsg("recycled transaction log file \"%s\"",
|
||||
xlde->d_name)));
|
||||
(errmsg("recycled transaction log file \"%s\"",
|
||||
xlde->d_name)));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No need for any more future segments... */
|
||||
ereport(LOG,
|
||||
(errmsg("removing transaction log file \"%s\"",
|
||||
xlde->d_name)));
|
||||
(errmsg("removing transaction log file \"%s\"",
|
||||
xlde->d_name)));
|
||||
unlink(path);
|
||||
}
|
||||
}
|
||||
@ -1672,8 +1672,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
|
||||
if (errno)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not read transaction log directory \"%s\": %m",
|
||||
XLogDir)));
|
||||
errmsg("could not read transaction log directory \"%s\": %m",
|
||||
XLogDir)));
|
||||
closedir(xldir);
|
||||
}
|
||||
|
||||
@ -1746,8 +1746,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
|
||||
if (!EQ_CRC64(record->xl_crc, crc))
|
||||
{
|
||||
ereport(emode,
|
||||
(errmsg("bad resource manager data checksum in record at %X/%X",
|
||||
recptr.xlogid, recptr.xrecoff)));
|
||||
(errmsg("bad resource manager data checksum in record at %X/%X",
|
||||
recptr.xlogid, recptr.xrecoff)));
|
||||
return (false);
|
||||
}
|
||||
|
||||
@ -1769,8 +1769,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
|
||||
if (!EQ_CRC64(cbuf, crc))
|
||||
{
|
||||
ereport(emode,
|
||||
(errmsg("bad checksum of backup block %d in record at %X/%X",
|
||||
i + 1, recptr.xlogid, recptr.xrecoff)));
|
||||
(errmsg("bad checksum of backup block %d in record at %X/%X",
|
||||
i + 1, recptr.xlogid, recptr.xrecoff)));
|
||||
return (false);
|
||||
}
|
||||
blk += sizeof(BkpBlock) + BLCKSZ;
|
||||
@ -1931,7 +1931,7 @@ got_record:;
|
||||
{
|
||||
ereport(emode,
|
||||
(errmsg("invalid resource manager id %u at %X/%X",
|
||||
record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
|
||||
record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
|
||||
goto next_record_is_invalid;
|
||||
}
|
||||
nextRecord = NULL;
|
||||
@ -2063,7 +2063,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
|
||||
{
|
||||
ereport(emode,
|
||||
(errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
|
||||
hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
|
||||
hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
|
||||
readId, readSeg, readOff)));
|
||||
return false;
|
||||
}
|
||||
@ -2084,7 +2084,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
|
||||
hdr->xlp_sui > lastReadSUI + 512)
|
||||
{
|
||||
ereport(emode,
|
||||
/* translator: SUI = startup id */
|
||||
/* translator: SUI = startup id */
|
||||
(errmsg("out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
|
||||
hdr->xlp_sui, lastReadSUI,
|
||||
readId, readSeg, readOff)));
|
||||
@ -2235,8 +2235,8 @@ ReadControlFile(void)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
|
||||
" but the server was compiled with PG_CONTROL_VERSION %d.",
|
||||
ControlFile->pg_control_version, PG_CONTROL_VERSION),
|
||||
" but the server was compiled with PG_CONTROL_VERSION %d.",
|
||||
ControlFile->pg_control_version, PG_CONTROL_VERSION),
|
||||
errhint("It looks like you need to initdb.")));
|
||||
/* Now check the CRC. */
|
||||
INIT_CRC64(crc);
|
||||
@ -2265,75 +2265,75 @@ ReadControlFile(void)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
|
||||
" but the server was compiled with CATALOG_VERSION_NO %d.",
|
||||
ControlFile->catalog_version_no, CATALOG_VERSION_NO),
|
||||
" but the server was compiled with CATALOG_VERSION_NO %d.",
|
||||
ControlFile->catalog_version_no, CATALOG_VERSION_NO),
|
||||
errhint("It looks like you need to initdb.")));
|
||||
if (ControlFile->blcksz != BLCKSZ)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with BLCKSZ %d,"
|
||||
" but the server was compiled with BLCKSZ %d.",
|
||||
ControlFile->blcksz, BLCKSZ),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
errdetail("The database cluster was initialized with BLCKSZ %d,"
|
||||
" but the server was compiled with BLCKSZ %d.",
|
||||
ControlFile->blcksz, BLCKSZ),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
if (ControlFile->relseg_size != RELSEG_SIZE)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
|
||||
" but the server was compiled with RELSEG_SIZE %d.",
|
||||
" but the server was compiled with RELSEG_SIZE %d.",
|
||||
ControlFile->relseg_size, RELSEG_SIZE),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
if (ControlFile->nameDataLen != NAMEDATALEN)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with NAMEDATALEN %d,"
|
||||
" but the server was compiled with NAMEDATALEN %d.",
|
||||
" but the server was compiled with NAMEDATALEN %d.",
|
||||
ControlFile->nameDataLen, NAMEDATALEN),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with FUNC_MAX_ARGS %d,"
|
||||
" but the server was compiled with FUNC_MAX_ARGS %d.",
|
||||
" but the server was compiled with FUNC_MAX_ARGS %d.",
|
||||
ControlFile->funcMaxArgs, FUNC_MAX_ARGS),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
|
||||
#ifdef HAVE_INT64_TIMESTAMP
|
||||
if (ControlFile->enableIntTimes != TRUE)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
|
||||
" but the server was compiled with HAVE_INT64_TIMESTAMP."),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
" but the server was compiled with HAVE_INT64_TIMESTAMP."),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
#else
|
||||
if (ControlFile->enableIntTimes != FALSE)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
|
||||
" but the server was compiled without HAVE_INT64_TIMESTAMP."),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
" but the server was compiled without HAVE_INT64_TIMESTAMP."),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
#endif
|
||||
|
||||
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with server"),
|
||||
errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
|
||||
" but the server was compiled with LOCALE_NAME_BUFLEN %d.",
|
||||
" but the server was compiled with LOCALE_NAME_BUFLEN %d.",
|
||||
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
errhint("It looks like you need to recompile or initdb.")));
|
||||
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with operating system"),
|
||||
errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
|
||||
" which is not recognized by setlocale().",
|
||||
ControlFile->lc_collate),
|
||||
errhint("It looks like you need to initdb or install locale support.")));
|
||||
(errmsg("database files are incompatible with operating system"),
|
||||
errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
|
||||
" which is not recognized by setlocale().",
|
||||
ControlFile->lc_collate),
|
||||
errhint("It looks like you need to initdb or install locale support.")));
|
||||
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
|
||||
ereport(FATAL,
|
||||
(errmsg("database files are incompatible with operating system"),
|
||||
errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
|
||||
" which is not recognized by setlocale().",
|
||||
ControlFile->lc_ctype),
|
||||
errhint("It looks like you need to initdb or install locale support.")));
|
||||
(errmsg("database files are incompatible with operating system"),
|
||||
errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
|
||||
" which is not recognized by setlocale().",
|
||||
ControlFile->lc_ctype),
|
||||
errhint("It looks like you need to initdb or install locale support.")));
|
||||
|
||||
/* Make the fixed locale settings visible as GUC variables, too */
|
||||
SetConfigOption("lc_collate", ControlFile->lc_collate,
|
||||
@ -2602,10 +2602,10 @@ StartupXLOG(void)
|
||||
str_time(ControlFile->time))));
|
||||
else if (ControlFile->state == DB_IN_RECOVERY)
|
||||
ereport(LOG,
|
||||
(errmsg("database system was interrupted while in recovery at %s",
|
||||
str_time(ControlFile->time)),
|
||||
errhint("This probably means that some data is corrupted and"
|
||||
" you will have to use the last backup for recovery.")));
|
||||
(errmsg("database system was interrupted while in recovery at %s",
|
||||
str_time(ControlFile->time)),
|
||||
errhint("This probably means that some data is corrupted and"
|
||||
" you will have to use the last backup for recovery.")));
|
||||
else if (ControlFile->state == DB_IN_PRODUCTION)
|
||||
ereport(LOG,
|
||||
(errmsg("database system was interrupted at %s",
|
||||
@ -2637,12 +2637,12 @@ StartupXLOG(void)
|
||||
checkPointLoc = ControlFile->prevCheckPoint;
|
||||
ereport(LOG,
|
||||
(errmsg("using previous checkpoint record at %X/%X",
|
||||
checkPointLoc.xlogid, checkPointLoc.xrecoff)));
|
||||
checkPointLoc.xlogid, checkPointLoc.xrecoff)));
|
||||
InRecovery = true; /* force recovery even if SHUTDOWNED */
|
||||
}
|
||||
else
|
||||
ereport(PANIC,
|
||||
(errmsg("could not locate a valid checkpoint record")));
|
||||
(errmsg("could not locate a valid checkpoint record")));
|
||||
}
|
||||
LastRec = RecPtr = checkPointLoc;
|
||||
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
|
||||
@ -2665,11 +2665,12 @@ StartupXLOG(void)
|
||||
ShmemVariableCache->oidCount = 0;
|
||||
|
||||
/*
|
||||
* If it was a shutdown checkpoint, then any following WAL entries were
|
||||
* created under the next StartUpID; if it was a regular checkpoint then
|
||||
* any following WAL entries were created under the same StartUpID.
|
||||
* We must replay WAL entries using the same StartUpID they were created
|
||||
* under, so temporarily adopt that SUI (see also xlog_redo()).
|
||||
* If it was a shutdown checkpoint, then any following WAL entries
|
||||
* were created under the next StartUpID; if it was a regular
|
||||
* checkpoint then any following WAL entries were created under the
|
||||
* same StartUpID. We must replay WAL entries using the same StartUpID
|
||||
* they were created under, so temporarily adopt that SUI (see also
|
||||
* xlog_redo()).
|
||||
*/
|
||||
if (wasShutdown)
|
||||
ThisStartUpID = checkPoint.ThisStartUpID + 1;
|
||||
@ -2690,7 +2691,7 @@ StartupXLOG(void)
|
||||
{
|
||||
if (wasShutdown)
|
||||
ereport(PANIC,
|
||||
(errmsg("invalid redo/undo record in shutdown checkpoint")));
|
||||
(errmsg("invalid redo/undo record in shutdown checkpoint")));
|
||||
InRecovery = true;
|
||||
}
|
||||
else if (ControlFile->state != DB_SHUTDOWNED)
|
||||
@ -2699,7 +2700,7 @@ StartupXLOG(void)
|
||||
/* REDO */
|
||||
if (InRecovery)
|
||||
{
|
||||
int rmid;
|
||||
int rmid;
|
||||
|
||||
ereport(LOG,
|
||||
(errmsg("database system was not properly shut down; "
|
||||
@ -2791,8 +2792,8 @@ StartupXLOG(void)
|
||||
|
||||
/*
|
||||
* Tricky point here: readBuf contains the *last* block that the
|
||||
* LastRec record spans, not the one it starts in. The last block
|
||||
* is indeed the one we want to use.
|
||||
* LastRec record spans, not the one it starts in. The last block is
|
||||
* indeed the one we want to use.
|
||||
*/
|
||||
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
|
||||
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
|
||||
@ -2818,11 +2819,12 @@ StartupXLOG(void)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Whenever Write.LogwrtResult points to exactly the end of a page,
|
||||
* Write.curridx must point to the *next* page (see XLogWrite()).
|
||||
* Whenever Write.LogwrtResult points to exactly the end of a
|
||||
* page, Write.curridx must point to the *next* page (see
|
||||
* XLogWrite()).
|
||||
*
|
||||
* Note: it might seem we should do AdvanceXLInsertBuffer() here,
|
||||
* but we can't since we haven't yet determined the correct StartUpID
|
||||
* Note: it might seem we should do AdvanceXLInsertBuffer() here, but
|
||||
* we can't since we haven't yet determined the correct StartUpID
|
||||
* to put into the new page's header. The first actual attempt to
|
||||
* insert a log record will advance the insert state.
|
||||
*/
|
||||
@ -2859,7 +2861,7 @@ StartupXLOG(void)
|
||||
|
||||
if (InRecovery)
|
||||
{
|
||||
int rmid;
|
||||
int rmid;
|
||||
|
||||
/*
|
||||
* Allow resource managers to do any required cleanup.
|
||||
@ -2885,14 +2887,15 @@ StartupXLOG(void)
|
||||
ThisStartUpID = ControlFile->checkPointCopy.ThisStartUpID;
|
||||
|
||||
/*
|
||||
* Perform a new checkpoint to update our recovery activity to disk.
|
||||
* Perform a new checkpoint to update our recovery activity to
|
||||
* disk.
|
||||
*
|
||||
* Note that we write a shutdown checkpoint. This is correct since
|
||||
* the records following it will use SUI one more than what is shown
|
||||
* in the checkpoint's ThisStartUpID.
|
||||
* the records following it will use SUI one more than what is
|
||||
* shown in the checkpoint's ThisStartUpID.
|
||||
*
|
||||
* In case we had to use the secondary checkpoint, make sure that
|
||||
* it will still be shown as the secondary checkpoint after this
|
||||
* In case we had to use the secondary checkpoint, make sure that it
|
||||
* will still be shown as the secondary checkpoint after this
|
||||
* CreateCheckPoint operation; we don't want the broken primary
|
||||
* checkpoint to become prevCheckPoint...
|
||||
*/
|
||||
@ -2907,10 +2910,10 @@ StartupXLOG(void)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If we are not doing recovery, then we saw a checkpoint with nothing
|
||||
* after it, and we can safely use StartUpID equal to one more than
|
||||
* the checkpoint's SUI. But just for paranoia's sake, check against
|
||||
* pg_control too.
|
||||
* If we are not doing recovery, then we saw a checkpoint with
|
||||
* nothing after it, and we can safely use StartUpID equal to one
|
||||
* more than the checkpoint's SUI. But just for paranoia's sake,
|
||||
* check against pg_control too.
|
||||
*/
|
||||
ThisStartUpID = checkPoint.ThisStartUpID;
|
||||
if (ThisStartUpID < ControlFile->checkPointCopy.ThisStartUpID)
|
||||
@ -2923,7 +2926,8 @@ StartupXLOG(void)
|
||||
PreallocXlogFiles(EndOfLog);
|
||||
|
||||
/*
|
||||
* Advance StartUpID to one more than the highest value used previously.
|
||||
* Advance StartUpID to one more than the highest value used
|
||||
* previously.
|
||||
*/
|
||||
ThisStartUpID++;
|
||||
XLogCtl->ThisStartUpID = ThisStartUpID;
|
||||
@ -2973,9 +2977,9 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
|
||||
if (!XRecOffIsValid(RecPtr.xrecoff))
|
||||
{
|
||||
ereport(LOG,
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
(errmsg("invalid %s checkpoint link in control file",
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2984,34 +2988,34 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
|
||||
if (record == NULL)
|
||||
{
|
||||
ereport(LOG,
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
(errmsg("invalid %s checkpoint record",
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
return NULL;
|
||||
}
|
||||
if (record->xl_rmid != RM_XLOG_ID)
|
||||
{
|
||||
ereport(LOG,
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
(errmsg("invalid resource manager id in %s checkpoint record",
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
(errmsg("invalid resource manager id in %s checkpoint record",
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
return NULL;
|
||||
}
|
||||
if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
|
||||
record->xl_info != XLOG_CHECKPOINT_ONLINE)
|
||||
{
|
||||
ereport(LOG,
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
(errmsg("invalid xl_info in %s checkpoint record",
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
return NULL;
|
||||
}
|
||||
if (record->xl_len != sizeof(CheckPoint))
|
||||
{
|
||||
ereport(LOG,
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
/* translator: %s is "primary" or "secondary" */
|
||||
(errmsg("invalid length of %s checkpoint record",
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
|
||||
return NULL;
|
||||
}
|
||||
return record;
|
||||
@ -3112,10 +3116,11 @@ CreateCheckPoint(bool shutdown, bool force)
|
||||
if (MyXactMadeXLogEntry)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
errmsg("checkpoint cannot be made inside transaction block")));
|
||||
errmsg("checkpoint cannot be made inside transaction block")));
|
||||
|
||||
/*
|
||||
* Acquire CheckpointLock to ensure only one checkpoint happens at a time.
|
||||
* Acquire CheckpointLock to ensure only one checkpoint happens at a
|
||||
* time.
|
||||
*
|
||||
* The CheckpointLock can be held for quite a while, which is not good
|
||||
* because we won't respond to a cancel/die request while waiting for
|
||||
@ -3149,14 +3154,15 @@ CreateCheckPoint(bool shutdown, bool force)
|
||||
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* If this isn't a shutdown or forced checkpoint, and we have not inserted
|
||||
* any XLOG records since the start of the last checkpoint, skip the
|
||||
* checkpoint. The idea here is to avoid inserting duplicate checkpoints
|
||||
* when the system is idle. That wastes log space, and more importantly it
|
||||
* exposes us to possible loss of both current and previous checkpoint
|
||||
* records if the machine crashes just as we're writing the update.
|
||||
* (Perhaps it'd make even more sense to checkpoint only when the previous
|
||||
* checkpoint record is in a different xlog page?)
|
||||
* If this isn't a shutdown or forced checkpoint, and we have not
|
||||
* inserted any XLOG records since the start of the last checkpoint,
|
||||
* skip the checkpoint. The idea here is to avoid inserting duplicate
|
||||
* checkpoints when the system is idle. That wastes log space, and
|
||||
* more importantly it exposes us to possible loss of both current and
|
||||
* previous checkpoint records if the machine crashes just as we're
|
||||
* writing the update. (Perhaps it'd make even more sense to
|
||||
* checkpoint only when the previous checkpoint record is in a
|
||||
* different xlog page?)
|
||||
*
|
||||
* We have to make two tests to determine that nothing has happened since
|
||||
* the start of the last checkpoint: current insertion point must
|
||||
@ -3204,12 +3210,13 @@ CreateCheckPoint(bool shutdown, bool force)
|
||||
* Here we update the shared RedoRecPtr for future XLogInsert calls;
|
||||
* this must be done while holding the insert lock AND the info_lck.
|
||||
*
|
||||
* Note: if we fail to complete the checkpoint, RedoRecPtr will be
|
||||
* left pointing past where it really needs to point. This is okay;
|
||||
* the only consequence is that XLogInsert might back up whole buffers
|
||||
* that it didn't really need to. We can't postpone advancing RedoRecPtr
|
||||
* because XLogInserts that happen while we are dumping buffers must
|
||||
* assume that their buffer changes are not included in the checkpoint.
|
||||
* Note: if we fail to complete the checkpoint, RedoRecPtr will be left
|
||||
* pointing past where it really needs to point. This is okay; the
|
||||
* only consequence is that XLogInsert might back up whole buffers
|
||||
* that it didn't really need to. We can't postpone advancing
|
||||
* RedoRecPtr because XLogInserts that happen while we are dumping
|
||||
* buffers must assume that their buffer changes are not included in
|
||||
* the checkpoint.
|
||||
*/
|
||||
{
|
||||
/* use volatile pointer to prevent code rearrangement */
|
||||
@ -3538,15 +3545,15 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
|
||||
if (pg_fsync(openLogFile) != 0)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("fsync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
errmsg("fsync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
if (open_sync_bit != new_sync_bit)
|
||||
{
|
||||
if (close(openLogFile) != 0)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
errmsg("close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
openLogFile = -1;
|
||||
}
|
||||
}
|
||||
@ -3570,16 +3577,16 @@ issue_xlog_fsync(void)
|
||||
if (pg_fsync(openLogFile) != 0)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("fsync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
errmsg("fsync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
break;
|
||||
#ifdef HAVE_FDATASYNC
|
||||
case SYNC_METHOD_FDATASYNC:
|
||||
if (pg_fdatasync(openLogFile) != 0)
|
||||
ereport(PANIC,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("fdatasync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
errmsg("fdatasync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg)));
|
||||
break;
|
||||
#endif
|
||||
case SYNC_METHOD_OPEN:
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.163 2003/07/27 21:49:53 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.164 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -238,7 +238,7 @@ BootstrapMain(int argc, char *argv[])
|
||||
*
|
||||
* If we are running under the postmaster, this is done already.
|
||||
*/
|
||||
if (!IsUnderPostmaster /* when exec || ExecBackend */)
|
||||
if (!IsUnderPostmaster /* when exec || ExecBackend */ )
|
||||
MemoryContextInit();
|
||||
|
||||
/*
|
||||
@ -247,7 +247,7 @@ BootstrapMain(int argc, char *argv[])
|
||||
|
||||
/* Set defaults, to be overriden by explicit options below */
|
||||
dbname = NULL;
|
||||
if (!IsUnderPostmaster /* when exec || ExecBackend*/)
|
||||
if (!IsUnderPostmaster /* when exec || ExecBackend */ )
|
||||
{
|
||||
InitializeGUCOptions();
|
||||
potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA
|
||||
@ -285,22 +285,22 @@ BootstrapMain(int argc, char *argv[])
|
||||
xlogop = atoi(optarg);
|
||||
break;
|
||||
case 'p':
|
||||
{
|
||||
/* indicates fork from postmaster */
|
||||
{
|
||||
/* indicates fork from postmaster */
|
||||
#ifdef EXEC_BACKEND
|
||||
char *p;
|
||||
char *p;
|
||||
|
||||
sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
|
||||
p = strchr(optarg, ',');
|
||||
if (p)
|
||||
p = strchr(p+1, ',');
|
||||
if (p)
|
||||
dbname = strdup(p+1);
|
||||
sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
|
||||
p = strchr(optarg, ',');
|
||||
if (p)
|
||||
p = strchr(p + 1, ',');
|
||||
if (p)
|
||||
dbname = strdup(p + 1);
|
||||
#else
|
||||
dbname = strdup(optarg);
|
||||
dbname = strdup(optarg);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'B':
|
||||
SetConfigOption("shared_buffers", optarg, PGC_POSTMASTER, PGC_S_ARGV);
|
||||
break;
|
||||
@ -346,12 +346,10 @@ BootstrapMain(int argc, char *argv[])
|
||||
usage();
|
||||
|
||||
|
||||
if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */)
|
||||
{
|
||||
if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */ )
|
||||
AttachSharedMemoryAndSemaphores();
|
||||
}
|
||||
|
||||
if (!IsUnderPostmaster /* when exec || ExecBackend*/)
|
||||
|
||||
if (!IsUnderPostmaster /* when exec || ExecBackend */ )
|
||||
{
|
||||
if (!potential_DataDir)
|
||||
{
|
||||
@ -473,8 +471,8 @@ BootstrapMain(int argc, char *argv[])
|
||||
|
||||
/*
|
||||
* In NOP mode, all we really want to do is create shared memory and
|
||||
* semaphores (just to prove we can do it with the current GUC settings).
|
||||
* So, quit now.
|
||||
* semaphores (just to prove we can do it with the current GUC
|
||||
* settings). So, quit now.
|
||||
*/
|
||||
if (xlogop == BS_XLOG_NOP)
|
||||
proc_exit(0);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.85 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.86 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* See acl.h.
|
||||
@ -97,37 +97,40 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
|
||||
|
||||
if (grantee->username)
|
||||
{
|
||||
aclitem.ai_grantee = get_usesysid(grantee->username);
|
||||
aclitem. ai_grantee = get_usesysid(grantee->username);
|
||||
|
||||
idtype = ACL_IDTYPE_UID;
|
||||
}
|
||||
else if (grantee->groupname)
|
||||
{
|
||||
aclitem.ai_grantee = get_grosysid(grantee->groupname);
|
||||
aclitem. ai_grantee = get_grosysid(grantee->groupname);
|
||||
|
||||
idtype = ACL_IDTYPE_GID;
|
||||
}
|
||||
else
|
||||
{
|
||||
aclitem.ai_grantee = ACL_ID_WORLD;
|
||||
aclitem. ai_grantee = ACL_ID_WORLD;
|
||||
|
||||
idtype = ACL_IDTYPE_WORLD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grant options can only be granted to individual users, not
|
||||
* groups or public. The reason is that if a user would
|
||||
* re-grant a privilege that he held through a group having a
|
||||
* grant option, and later the user is removed from the group,
|
||||
* the situation is impossible to clean up.
|
||||
* groups or public. The reason is that if a user would re-grant
|
||||
* a privilege that he held through a group having a grant option,
|
||||
* and later the user is removed from the group, the situation is
|
||||
* impossible to clean up.
|
||||
*/
|
||||
if (is_grant && idtype != ACL_IDTYPE_UID && grant_option)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
|
||||
errmsg("grant options can only be granted to individual users")));
|
||||
|
||||
aclitem.ai_grantor = GetUserId();
|
||||
aclitem. ai_grantor = GetUserId();
|
||||
|
||||
ACLITEM_SET_PRIVS_IDTYPE(aclitem,
|
||||
(is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
|
||||
(grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
|
||||
(is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
|
||||
(grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
|
||||
idtype);
|
||||
|
||||
new_acl = aclinsert3(new_acl, &aclitem, modechg, behavior);
|
||||
@ -247,7 +250,7 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
|
||||
|
||||
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
|
||||
stmt->grantees, privileges,
|
||||
stmt->grant_option, stmt->behavior);
|
||||
stmt->grant_option, stmt->behavior);
|
||||
|
||||
/* finished building new ACL value, now insert it */
|
||||
MemSet(values, 0, sizeof(values));
|
||||
@ -346,7 +349,7 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
|
||||
|
||||
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
|
||||
stmt->grantees, privileges,
|
||||
stmt->grant_option, stmt->behavior);
|
||||
stmt->grant_option, stmt->behavior);
|
||||
|
||||
/* finished building new ACL value, now insert it */
|
||||
MemSet(values, 0, sizeof(values));
|
||||
@ -443,7 +446,7 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
|
||||
|
||||
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
|
||||
stmt->grantees, privileges,
|
||||
stmt->grant_option, stmt->behavior);
|
||||
stmt->grant_option, stmt->behavior);
|
||||
|
||||
/* finished building new ACL value, now insert it */
|
||||
MemSet(values, 0, sizeof(values));
|
||||
@ -543,7 +546,7 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
|
||||
|
||||
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
|
||||
stmt->grantees, privileges,
|
||||
stmt->grant_option, stmt->behavior);
|
||||
stmt->grant_option, stmt->behavior);
|
||||
|
||||
/* finished building new ACL value, now insert it */
|
||||
MemSet(values, 0, sizeof(values));
|
||||
@ -619,7 +622,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
|
||||
pg_namespace_tuple = (Form_pg_namespace) GETSTRUCT(tuple);
|
||||
|
||||
if (stmt->is_grant
|
||||
&& !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
|
||||
&& !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
|
||||
&& pg_namespace_aclcheck(HeapTupleGetOid(tuple), GetUserId(), ACL_GRANT_OPTION_FOR(privileges)) != ACLCHECK_OK)
|
||||
aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_NAMESPACE,
|
||||
nspname);
|
||||
@ -640,7 +643,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
|
||||
|
||||
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
|
||||
stmt->grantees, privileges,
|
||||
stmt->grant_option, stmt->behavior);
|
||||
stmt->grant_option, stmt->behavior);
|
||||
|
||||
/* finished building new ACL value, now insert it */
|
||||
MemSet(values, 0, sizeof(values));
|
||||
@ -805,7 +808,7 @@ in_group(AclId uid, AclId gid)
|
||||
static AclResult
|
||||
aclcheck(Acl *acl, AclId userid, AclMode mode)
|
||||
{
|
||||
AclItem *aidat;
|
||||
AclItem *aidat;
|
||||
int i,
|
||||
num;
|
||||
|
||||
@ -833,10 +836,10 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
|
||||
if (aidat[i].ai_privs & mode)
|
||||
return ACLCHECK_OK;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* See if he has the permission via any group (do this in a
|
||||
* separate pass to avoid expensive(?) lookups in pg_group)
|
||||
* See if he has the permission via any group (do this in a separate
|
||||
* pass to avoid expensive(?) lookups in pg_group)
|
||||
*/
|
||||
for (i = 0; i < num; i++)
|
||||
if (ACLITEM_GET_IDTYPE(aidat[i]) == ACL_IDTYPE_GID
|
||||
@ -856,7 +859,7 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
|
||||
* supply strings that might be already quoted.
|
||||
*/
|
||||
|
||||
static const char * const no_priv_msg[MAX_ACL_KIND] =
|
||||
static const char *const no_priv_msg[MAX_ACL_KIND] =
|
||||
{
|
||||
/* ACL_KIND_CLASS */
|
||||
gettext_noop("permission denied for relation %s"),
|
||||
@ -878,7 +881,7 @@ static const char * const no_priv_msg[MAX_ACL_KIND] =
|
||||
gettext_noop("permission denied for conversion %s")
|
||||
};
|
||||
|
||||
static const char * const not_owner_msg[MAX_ACL_KIND] =
|
||||
static const char *const not_owner_msg[MAX_ACL_KIND] =
|
||||
{
|
||||
/* ACL_KIND_CLASS */
|
||||
gettext_noop("must be owner of relation %s"),
|
||||
@ -972,7 +975,7 @@ pg_class_aclcheck(Oid table_oid, AclId userid, AclMode mode)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_TABLE),
|
||||
errmsg("relation with OID %u does not exist", table_oid)));
|
||||
errmsg("relation with OID %u does not exist", table_oid)));
|
||||
|
||||
/*
|
||||
* Deny anyone permission to update a system catalog unless
|
||||
@ -1124,7 +1127,7 @@ pg_proc_aclcheck(Oid proc_oid, AclId userid, AclMode mode)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
||||
errmsg("function with OID %u does not exist", proc_oid)));
|
||||
errmsg("function with OID %u does not exist", proc_oid)));
|
||||
|
||||
aclDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proacl,
|
||||
&isNull);
|
||||
@ -1179,7 +1182,7 @@ pg_language_aclcheck(Oid lang_oid, AclId userid, AclMode mode)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("language with OID %u does not exist", lang_oid)));
|
||||
errmsg("language with OID %u does not exist", lang_oid)));
|
||||
|
||||
aclDatum = SysCacheGetAttr(LANGOID, tuple, Anum_pg_language_lanacl,
|
||||
&isNull);
|
||||
@ -1288,7 +1291,7 @@ pg_class_ownercheck(Oid class_oid, AclId userid)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_TABLE),
|
||||
errmsg("relation with OID %u does not exist", class_oid)));
|
||||
errmsg("relation with OID %u does not exist", class_oid)));
|
||||
|
||||
owner_id = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
|
||||
|
||||
@ -1344,7 +1347,7 @@ pg_oper_ownercheck(Oid oper_oid, AclId userid)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
||||
errmsg("operator with OID %u does not exist", oper_oid)));
|
||||
errmsg("operator with OID %u does not exist", oper_oid)));
|
||||
|
||||
owner_id = ((Form_pg_operator) GETSTRUCT(tuple))->oprowner;
|
||||
|
||||
@ -1372,7 +1375,7 @@ pg_proc_ownercheck(Oid proc_oid, AclId userid)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
||||
errmsg("function with OID %u does not exist", proc_oid)));
|
||||
errmsg("function with OID %u does not exist", proc_oid)));
|
||||
|
||||
owner_id = ((Form_pg_proc) GETSTRUCT(tuple))->proowner;
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.28 2003/07/28 00:09:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.29 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -93,8 +93,8 @@ static Oid object_classes[MAX_OCLASS];
|
||||
|
||||
|
||||
static void findAutoDeletableObjects(const ObjectAddress *object,
|
||||
ObjectAddresses *oktodelete,
|
||||
Relation depRel);
|
||||
ObjectAddresses *oktodelete,
|
||||
Relation depRel);
|
||||
static bool recursiveDeletion(const ObjectAddress *object,
|
||||
DropBehavior behavior,
|
||||
int msglevel,
|
||||
@ -102,11 +102,11 @@ static bool recursiveDeletion(const ObjectAddress *object,
|
||||
ObjectAddresses *oktodelete,
|
||||
Relation depRel);
|
||||
static bool deleteDependentObjects(const ObjectAddress *object,
|
||||
const char *objDescription,
|
||||
DropBehavior behavior,
|
||||
int msglevel,
|
||||
ObjectAddresses *oktodelete,
|
||||
Relation depRel);
|
||||
const char *objDescription,
|
||||
DropBehavior behavior,
|
||||
int msglevel,
|
||||
ObjectAddresses *oktodelete,
|
||||
Relation depRel);
|
||||
static void doDeletion(const ObjectAddress *object);
|
||||
static bool find_expr_references_walker(Node *node,
|
||||
find_expr_references_context *context);
|
||||
@ -118,7 +118,7 @@ static void add_object_address(ObjectClasses oclass, Oid objectId, int32 subId,
|
||||
static void add_exact_object_address(const ObjectAddress *object,
|
||||
ObjectAddresses *addrs);
|
||||
static bool object_address_present(const ObjectAddress *object,
|
||||
ObjectAddresses *addrs);
|
||||
ObjectAddresses *addrs);
|
||||
static void term_object_addresses(ObjectAddresses *addrs);
|
||||
static void init_object_classes(void);
|
||||
static ObjectClasses getObjectClass(const ObjectAddress *object);
|
||||
@ -158,9 +158,9 @@ performDeletion(const ObjectAddress *object,
|
||||
|
||||
/*
|
||||
* Construct a list of objects that are reachable by AUTO or INTERNAL
|
||||
* dependencies from the target object. These should be deleted silently,
|
||||
* even if the actual deletion pass first reaches one of them via a
|
||||
* non-auto dependency.
|
||||
* dependencies from the target object. These should be deleted
|
||||
* silently, even if the actual deletion pass first reaches one of
|
||||
* them via a non-auto dependency.
|
||||
*/
|
||||
init_object_addresses(&oktodelete);
|
||||
|
||||
@ -170,8 +170,8 @@ performDeletion(const ObjectAddress *object,
|
||||
NULL, &oktodelete, depRel))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
|
||||
errmsg("cannot drop %s because other objects depend on it",
|
||||
objDescription),
|
||||
errmsg("cannot drop %s because other objects depend on it",
|
||||
objDescription),
|
||||
errhint("Use DROP ... CASCADE to drop the dependent objects too.")));
|
||||
|
||||
term_object_addresses(&oktodelete);
|
||||
@ -184,7 +184,7 @@ performDeletion(const ObjectAddress *object,
|
||||
|
||||
/*
|
||||
* deleteWhatDependsOn: attempt to drop everything that depends on the
|
||||
* specified object, though not the object itself. Behavior is always
|
||||
* specified object, though not the object itself. Behavior is always
|
||||
* CASCADE.
|
||||
*
|
||||
* This is currently used only to clean out the contents of a schema
|
||||
@ -212,9 +212,9 @@ deleteWhatDependsOn(const ObjectAddress *object,
|
||||
|
||||
/*
|
||||
* Construct a list of objects that are reachable by AUTO or INTERNAL
|
||||
* dependencies from the target object. These should be deleted silently,
|
||||
* even if the actual deletion pass first reaches one of them via a
|
||||
* non-auto dependency.
|
||||
* dependencies from the target object. These should be deleted
|
||||
* silently, even if the actual deletion pass first reaches one of
|
||||
* them via a non-auto dependency.
|
||||
*/
|
||||
init_object_addresses(&oktodelete);
|
||||
|
||||
@ -266,9 +266,9 @@ findAutoDeletableObjects(const ObjectAddress *object,
|
||||
ObjectAddress otherObject;
|
||||
|
||||
/*
|
||||
* If this object is already in oktodelete, then we already visited it;
|
||||
* don't do so again (this prevents infinite recursion if there's a loop
|
||||
* in pg_depend). Otherwise, add it.
|
||||
* If this object is already in oktodelete, then we already visited
|
||||
* it; don't do so again (this prevents infinite recursion if there's
|
||||
* a loop in pg_depend). Otherwise, add it.
|
||||
*/
|
||||
if (object_address_present(object, oktodelete))
|
||||
return;
|
||||
@ -276,8 +276,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
|
||||
|
||||
/*
|
||||
* Scan pg_depend records that link to this object, showing the things
|
||||
* that depend on it. For each one that is AUTO or INTERNAL, visit the
|
||||
* referencing object.
|
||||
* that depend on it. For each one that is AUTO or INTERNAL, visit
|
||||
* the referencing object.
|
||||
*
|
||||
* When dropping a whole object (subId = 0), find pg_depend records for
|
||||
* its sub-objects too.
|
||||
@ -319,6 +319,7 @@ findAutoDeletableObjects(const ObjectAddress *object,
|
||||
findAutoDeletableObjects(&otherObject, oktodelete, depRel);
|
||||
break;
|
||||
case DEPENDENCY_PIN:
|
||||
|
||||
/*
|
||||
* For a PIN dependency we just ereport immediately; there
|
||||
* won't be any others to examine, and we aren't ever
|
||||
@ -461,11 +462,11 @@ recursiveDeletion(const ObjectAddress *object,
|
||||
char *otherObjDesc = getObjectDescription(&otherObject);
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
|
||||
errmsg("cannot drop %s because %s requires it",
|
||||
objDescription, otherObjDesc),
|
||||
errhint("You may drop %s instead.",
|
||||
otherObjDesc)));
|
||||
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
|
||||
errmsg("cannot drop %s because %s requires it",
|
||||
objDescription, otherObjDesc),
|
||||
errhint("You may drop %s instead.",
|
||||
otherObjDesc)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -559,10 +560,9 @@ recursiveDeletion(const ObjectAddress *object,
|
||||
/*
|
||||
* Step 2: scan pg_depend records that link to this object, showing
|
||||
* the things that depend on it. Recursively delete those things.
|
||||
* Note it's important to delete the dependent objects
|
||||
* before the referenced one, since the deletion routines might do
|
||||
* things like try to update the pg_class record when deleting a check
|
||||
* constraint.
|
||||
* Note it's important to delete the dependent objects before the
|
||||
* referenced one, since the deletion routines might do things like
|
||||
* try to update the pg_class record when deleting a check constraint.
|
||||
*/
|
||||
if (!deleteDependentObjects(object, objDescription,
|
||||
behavior, msglevel,
|
||||
@ -674,11 +674,12 @@ deleteDependentObjects(const ObjectAddress *object,
|
||||
switch (foundDep->deptype)
|
||||
{
|
||||
case DEPENDENCY_NORMAL:
|
||||
|
||||
/*
|
||||
* Perhaps there was another dependency path that would
|
||||
* have allowed silent deletion of the otherObject, had
|
||||
* we only taken that path first.
|
||||
* In that case, act like this link is AUTO, too.
|
||||
* have allowed silent deletion of the otherObject, had we
|
||||
* only taken that path first. In that case, act like this
|
||||
* link is AUTO, too.
|
||||
*/
|
||||
if (object_address_present(&otherObject, oktodelete))
|
||||
ereport(DEBUG2,
|
||||
@ -872,7 +873,7 @@ recordDependencyOnExpr(const ObjectAddress *depender,
|
||||
* recordDependencyOnSingleRelExpr - find expression dependencies
|
||||
*
|
||||
* As above, but only one relation is expected to be referenced (with
|
||||
* varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
|
||||
* varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
|
||||
* range table. An additional frammish is that dependencies on that
|
||||
* relation (or its component columns) will be marked with 'self_behavior',
|
||||
* whereas 'behavior' is used for everything else.
|
||||
@ -1001,7 +1002,7 @@ find_expr_references_walker(Node *node,
|
||||
else if (rte->rtekind == RTE_JOIN)
|
||||
{
|
||||
/* Scan join output column to add references to join inputs */
|
||||
List *save_rtables;
|
||||
List *save_rtables;
|
||||
|
||||
/* We must make the context appropriate for join's level */
|
||||
save_rtables = context->rtables;
|
||||
@ -1026,7 +1027,7 @@ find_expr_references_walker(Node *node,
|
||||
}
|
||||
if (IsA(node, OpExpr))
|
||||
{
|
||||
OpExpr *opexpr = (OpExpr *) node;
|
||||
OpExpr *opexpr = (OpExpr *) node;
|
||||
|
||||
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
|
||||
&context->addrs);
|
||||
@ -1034,7 +1035,7 @@ find_expr_references_walker(Node *node,
|
||||
}
|
||||
if (IsA(node, DistinctExpr))
|
||||
{
|
||||
DistinctExpr *distinctexpr = (DistinctExpr *) node;
|
||||
DistinctExpr *distinctexpr = (DistinctExpr *) node;
|
||||
|
||||
add_object_address(OCLASS_OPERATOR, distinctexpr->opno, 0,
|
||||
&context->addrs);
|
||||
@ -1042,7 +1043,7 @@ find_expr_references_walker(Node *node,
|
||||
}
|
||||
if (IsA(node, ScalarArrayOpExpr))
|
||||
{
|
||||
ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
|
||||
ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
|
||||
|
||||
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
|
||||
&context->addrs);
|
||||
@ -1066,7 +1067,7 @@ find_expr_references_walker(Node *node,
|
||||
}
|
||||
if (IsA(node, SubLink))
|
||||
{
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
SubLink *sublink = (SubLink *) node;
|
||||
List *opid;
|
||||
|
||||
foreach(opid, sublink->operOids)
|
||||
@ -1092,7 +1093,8 @@ find_expr_references_walker(Node *node,
|
||||
* Add whole-relation refs for each plain relation mentioned in
|
||||
* the subquery's rtable. (Note: query_tree_walker takes care of
|
||||
* recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need
|
||||
* to do that here. But keep it from looking at join alias lists.)
|
||||
* to do that here. But keep it from looking at join alias
|
||||
* lists.)
|
||||
*/
|
||||
foreach(rtable, query->rtable)
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.249 2003/07/29 17:21:20 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.250 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -418,8 +418,8 @@ CheckAttributeType(const char *attname, Oid atttypid)
|
||||
* Warn user, but don't fail, if column to be created has UNKNOWN type
|
||||
* (usually as a result of a 'retrieve into' - jolly)
|
||||
*
|
||||
* Refuse any attempt to create a pseudo-type column or one that uses
|
||||
* a standalone composite type. (Eventually we should probably refuse
|
||||
* Refuse any attempt to create a pseudo-type column or one that uses a
|
||||
* standalone composite type. (Eventually we should probably refuse
|
||||
* all references to complex types, but for now there's still some
|
||||
* Berkeley-derived code that thinks it can do this...)
|
||||
*/
|
||||
@ -439,7 +439,7 @@ CheckAttributeType(const char *attname, Oid atttypid)
|
||||
}
|
||||
else if (att_typtype == 'c')
|
||||
{
|
||||
Oid typrelid = get_typ_typrelid(atttypid);
|
||||
Oid typrelid = get_typ_typrelid(atttypid);
|
||||
|
||||
if (get_rel_relkind(typrelid) == RELKIND_COMPOSITE_TYPE)
|
||||
ereport(ERROR,
|
||||
@ -975,12 +975,13 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
|
||||
attStruct->attisdropped = true;
|
||||
|
||||
/*
|
||||
* Set the type OID to invalid. A dropped attribute's type link cannot
|
||||
* be relied on (once the attribute is dropped, the type might be too).
|
||||
* Fortunately we do not need the type row --- the only really essential
|
||||
* information is the type's typlen and typalign, which are preserved in
|
||||
* the attribute's attlen and attalign. We set atttypid to zero here
|
||||
* as a means of catching code that incorrectly expects it to be valid.
|
||||
* Set the type OID to invalid. A dropped attribute's type link
|
||||
* cannot be relied on (once the attribute is dropped, the type might
|
||||
* be too). Fortunately we do not need the type row --- the only
|
||||
* really essential information is the type's typlen and typalign,
|
||||
* which are preserved in the attribute's attlen and attalign. We set
|
||||
* atttypid to zero here as a means of catching code that incorrectly
|
||||
* expects it to be valid.
|
||||
*/
|
||||
attStruct->atttypid = InvalidOid;
|
||||
|
||||
@ -1401,7 +1402,7 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
|
||||
' ',
|
||||
' ',
|
||||
' ',
|
||||
InvalidOid, /* no associated index */
|
||||
InvalidOid, /* no associated index */
|
||||
expr, /* Tree form check constraint */
|
||||
ccbin, /* Binary form check constraint */
|
||||
ccsrc); /* Source form check constraint */
|
||||
@ -1568,8 +1569,8 @@ AddRelationRawConstraints(Relation rel,
|
||||
if (strcmp(cdef2->name, ccname) == 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("CHECK constraint \"%s\" already exists",
|
||||
ccname)));
|
||||
errmsg("CHECK constraint \"%s\" already exists",
|
||||
ccname)));
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -1639,7 +1640,7 @@ AddRelationRawConstraints(Relation rel,
|
||||
if (pstate->p_hasSubLinks)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use sub-select in CHECK constraint")));
|
||||
errmsg("cannot use sub-select in CHECK constraint")));
|
||||
if (pstate->p_hasAggs)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_GROUPING_ERROR),
|
||||
@ -1750,7 +1751,7 @@ cookDefault(ParseState *pstate,
|
||||
if (contain_var_clause(expr))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("cannot use column references in DEFAULT clause")));
|
||||
errmsg("cannot use column references in DEFAULT clause")));
|
||||
|
||||
/*
|
||||
* It can't return a set either.
|
||||
@ -1773,9 +1774,9 @@ cookDefault(ParseState *pstate,
|
||||
errmsg("cannot use aggregate in DEFAULT clause")));
|
||||
|
||||
/*
|
||||
* Coerce the expression to the correct type and typmod, if given. This
|
||||
* should match the parser's processing of non-defaulted expressions ---
|
||||
* see updateTargetListEntry().
|
||||
* Coerce the expression to the correct type and typmod, if given.
|
||||
* This should match the parser's processing of non-defaulted
|
||||
* expressions --- see updateTargetListEntry().
|
||||
*/
|
||||
if (OidIsValid(atttypid))
|
||||
{
|
||||
@ -1793,7 +1794,7 @@ cookDefault(ParseState *pstate,
|
||||
attname,
|
||||
format_type_be(atttypid),
|
||||
format_type_be(type_id)),
|
||||
errhint("You will need to rewrite or cast the expression.")));
|
||||
errhint("You will need to rewrite or cast the expression.")));
|
||||
}
|
||||
|
||||
return expr;
|
||||
@ -1952,7 +1953,7 @@ RelationTruncateIndexes(Oid heapId)
|
||||
|
||||
/*
|
||||
* index_build will close both the heap and index relations (but
|
||||
* not give up the locks we hold on them). We're done with this
|
||||
* not give up the locks we hold on them). We're done with this
|
||||
* index, but we must re-open the heap rel.
|
||||
*/
|
||||
heapRelation = heap_open(heapId, NoLock);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.212 2003/07/21 01:59:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.213 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -65,8 +65,8 @@
|
||||
|
||||
/* non-export function prototypes */
|
||||
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
|
||||
IndexInfo *indexInfo,
|
||||
Oid *classObjectId);
|
||||
IndexInfo *indexInfo,
|
||||
Oid *classObjectId);
|
||||
static void UpdateRelationRelation(Relation indexRelation);
|
||||
static void InitializeAttributeOids(Relation indexRelation,
|
||||
int numatts, Oid indexoid);
|
||||
@ -124,7 +124,7 @@ ConstructTupleDescriptor(Relation heapRelation,
|
||||
|
||||
/*
|
||||
* For simple index columns, we copy the pg_attribute row from the
|
||||
* parent relation and modify it as necessary. For expressions we
|
||||
* parent relation and modify it as necessary. For expressions we
|
||||
* have to cons up a pg_attribute row the hard way.
|
||||
*/
|
||||
for (i = 0; i < numatts; i++)
|
||||
@ -149,7 +149,7 @@ ConstructTupleDescriptor(Relation heapRelation,
|
||||
* here we are indexing on a system attribute (-1...-n)
|
||||
*/
|
||||
from = SystemAttributeDefinition(atnum,
|
||||
heapRelation->rd_rel->relhasoids);
|
||||
heapRelation->rd_rel->relhasoids);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -162,8 +162,8 @@ ConstructTupleDescriptor(Relation heapRelation,
|
||||
}
|
||||
|
||||
/*
|
||||
* now that we've determined the "from", let's copy the tuple desc
|
||||
* data...
|
||||
* now that we've determined the "from", let's copy the tuple
|
||||
* desc data...
|
||||
*/
|
||||
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
|
||||
|
||||
@ -185,7 +185,7 @@ ConstructTupleDescriptor(Relation heapRelation,
|
||||
/* Expressional index */
|
||||
Node *indexkey;
|
||||
|
||||
if (indexprs == NIL) /* shouldn't happen */
|
||||
if (indexprs == NIL) /* shouldn't happen */
|
||||
elog(ERROR, "too few entries in indexprs list");
|
||||
indexkey = (Node *) lfirst(indexprs);
|
||||
indexprs = lnext(indexprs);
|
||||
@ -197,7 +197,8 @@ ConstructTupleDescriptor(Relation heapRelation,
|
||||
sprintf(NameStr(to->attname), "pg_expression_%d", i + 1);
|
||||
|
||||
/*
|
||||
* Lookup the expression type in pg_type for the type length etc.
|
||||
* Lookup the expression type in pg_type for the type length
|
||||
* etc.
|
||||
*/
|
||||
keyType = exprType(indexkey);
|
||||
tuple = SearchSysCache(TYPEOID,
|
||||
@ -534,7 +535,7 @@ index_create(Oid heapRelationId,
|
||||
if (shared_relation && IsUnderPostmaster)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("shared indexes cannot be created after initdb")));
|
||||
errmsg("shared indexes cannot be created after initdb")));
|
||||
|
||||
if (get_relname_relid(indexRelationName, namespaceId))
|
||||
ereport(ERROR,
|
||||
@ -668,7 +669,7 @@ index_create(Oid heapRelationId,
|
||||
' ',
|
||||
' ',
|
||||
' ',
|
||||
InvalidOid, /* no associated index */
|
||||
InvalidOid, /* no associated index */
|
||||
NULL, /* no check constraint */
|
||||
NULL,
|
||||
NULL);
|
||||
@ -709,7 +710,7 @@ index_create(Oid heapRelationId,
|
||||
if (indexInfo->ii_Expressions)
|
||||
{
|
||||
recordDependencyOnSingleRelExpr(&myself,
|
||||
(Node *) indexInfo->ii_Expressions,
|
||||
(Node *) indexInfo->ii_Expressions,
|
||||
heapRelationId,
|
||||
DEPENDENCY_NORMAL,
|
||||
DEPENDENCY_AUTO);
|
||||
@ -719,7 +720,7 @@ index_create(Oid heapRelationId,
|
||||
if (indexInfo->ii_Predicate)
|
||||
{
|
||||
recordDependencyOnSingleRelExpr(&myself,
|
||||
(Node *) indexInfo->ii_Predicate,
|
||||
(Node *) indexInfo->ii_Predicate,
|
||||
heapRelationId,
|
||||
DEPENDENCY_NORMAL,
|
||||
DEPENDENCY_AUTO);
|
||||
@ -831,8 +832,8 @@ index_drop(Oid indexId)
|
||||
|
||||
/*
|
||||
* We are presently too lazy to attempt to compute the new correct
|
||||
* value of relhasindex (the next VACUUM will fix it if necessary).
|
||||
* So there is no need to update the pg_class tuple for the owning
|
||||
* value of relhasindex (the next VACUUM will fix it if necessary). So
|
||||
* there is no need to update the pg_class tuple for the owning
|
||||
* relation. But we must send out a shared-cache-inval notice on the
|
||||
* owning relation to ensure other backends update their relcache
|
||||
* lists of indexes.
|
||||
@ -958,7 +959,7 @@ FormIndexDatum(IndexInfo *indexInfo,
|
||||
if (indexprs == NIL)
|
||||
elog(ERROR, "wrong number of index expressions");
|
||||
iDatum = ExecEvalExprSwitchContext((ExprState *) lfirst(indexprs),
|
||||
GetPerTupleExprContext(estate),
|
||||
GetPerTupleExprContext(estate),
|
||||
&isNull,
|
||||
NULL);
|
||||
indexprs = lnext(indexprs);
|
||||
@ -1160,7 +1161,7 @@ setNewRelfilenode(Relation relation)
|
||||
if (!in_place_upd)
|
||||
{
|
||||
tuple = SearchSysCacheCopy(RELOID,
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
0, 0, 0);
|
||||
}
|
||||
else
|
||||
@ -1170,7 +1171,7 @@ setNewRelfilenode(Relation relation)
|
||||
ScanKeyEntryInitialize(&key[0], 0,
|
||||
ObjectIdAttributeNumber,
|
||||
F_OIDEQ,
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)));
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)));
|
||||
|
||||
pg_class_scan = heap_beginscan(pg_class, SnapshotNow, 1, key);
|
||||
tuple = heap_getnext(pg_class_scan, ForwardScanDirection);
|
||||
@ -1325,9 +1326,9 @@ UpdateStats(Oid relid, double reltuples)
|
||||
}
|
||||
|
||||
/*
|
||||
* Update statistics in pg_class, if they changed. (Avoiding an
|
||||
* unnecessary update is not just a tiny performance improvement;
|
||||
* it also reduces the window wherein concurrent CREATE INDEX commands
|
||||
* Update statistics in pg_class, if they changed. (Avoiding an
|
||||
* unnecessary update is not just a tiny performance improvement; it
|
||||
* also reduces the window wherein concurrent CREATE INDEX commands
|
||||
* may conflict.)
|
||||
*/
|
||||
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
|
||||
@ -1338,8 +1339,9 @@ UpdateStats(Oid relid, double reltuples)
|
||||
if (in_place_upd)
|
||||
{
|
||||
/*
|
||||
* At bootstrap time, we don't need to worry about concurrency or
|
||||
* visibility of changes, so we cheat. Also cheat if REINDEX.
|
||||
* At bootstrap time, we don't need to worry about concurrency
|
||||
* or visibility of changes, so we cheat. Also cheat if
|
||||
* REINDEX.
|
||||
*/
|
||||
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
|
||||
rd_rel->relpages = (int32) relpages;
|
||||
@ -1367,7 +1369,7 @@ UpdateStats(Oid relid, double reltuples)
|
||||
/*
|
||||
* We shouldn't have to do this, but we do... Modify the reldesc in
|
||||
* place with the new values so that the cache contains the latest
|
||||
* copy. (XXX is this really still necessary? The relcache will get
|
||||
* copy. (XXX is this really still necessary? The relcache will get
|
||||
* fixed at next CommandCounterIncrement, so why bother here?)
|
||||
*/
|
||||
whichRel->rd_rel->relpages = (int32) relpages;
|
||||
@ -1454,8 +1456,8 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
heapDescriptor = RelationGetDescr(heapRelation);
|
||||
|
||||
/*
|
||||
* Need an EState for evaluation of index expressions
|
||||
* and partial-index predicates.
|
||||
* Need an EState for evaluation of index expressions and
|
||||
* partial-index predicates.
|
||||
*/
|
||||
estate = CreateExecutorState();
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
@ -1463,7 +1465,8 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
/*
|
||||
* If this is a predicate (partial) index, we will need to evaluate
|
||||
* the predicate using ExecQual, which requires the current tuple to
|
||||
* be in a slot of a TupleTable. Likewise if there are any expressions.
|
||||
* be in a slot of a TupleTable. Likewise if there are any
|
||||
* expressions.
|
||||
*/
|
||||
if (indexInfo->ii_Predicate != NIL || indexInfo->ii_Expressions != NIL)
|
||||
{
|
||||
@ -1741,15 +1744,15 @@ reindex_index(Oid indexId, bool force, bool inplace)
|
||||
* it's a nailed-in-cache index, we must do inplace processing because
|
||||
* the relcache can't cope with changing its relfilenode.
|
||||
*
|
||||
* In either of these cases, we are definitely processing a system
|
||||
* index, so we'd better be ignoring system indexes.
|
||||
* In either of these cases, we are definitely processing a system index,
|
||||
* so we'd better be ignoring system indexes.
|
||||
*/
|
||||
if (iRel->rd_rel->relisshared)
|
||||
{
|
||||
if (!IsIgnoringSystemIndexes())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("the target relation %u is shared", indexId)));
|
||||
errmsg("the target relation %u is shared", indexId)));
|
||||
inplace = true;
|
||||
}
|
||||
if (iRel->rd_isnailed)
|
||||
@ -1757,7 +1760,7 @@ reindex_index(Oid indexId, bool force, bool inplace)
|
||||
if (!IsIgnoringSystemIndexes())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("the target relation %u is nailed", indexId)));
|
||||
errmsg("the target relation %u is nailed", indexId)));
|
||||
inplace = true;
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.55 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.56 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -164,7 +164,7 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK)
|
||||
if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cross-database references are not implemented")));
|
||||
errmsg("cross-database references are not implemented")));
|
||||
}
|
||||
|
||||
if (relation->schemaname)
|
||||
@ -217,7 +217,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
|
||||
if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cross-database references are not implemented")));
|
||||
errmsg("cross-database references are not implemented")));
|
||||
}
|
||||
|
||||
if (newRelation->istemp)
|
||||
@ -226,7 +226,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
|
||||
if (newRelation->schemaname)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
||||
errmsg("TEMP tables may not specify a schema name")));
|
||||
errmsg("TEMP tables may not specify a schema name")));
|
||||
/* Initialize temp namespace if first time through */
|
||||
if (!OidIsValid(myTempNamespace))
|
||||
InitTempTableNamespace();
|
||||
@ -1057,7 +1057,7 @@ OpclassIsVisible(Oid opcid)
|
||||
Oid
|
||||
ConversionGetConid(const char *conname)
|
||||
{
|
||||
Oid conid;
|
||||
Oid conid;
|
||||
List *lptr;
|
||||
|
||||
recomputeNamespacePath();
|
||||
@ -1115,11 +1115,11 @@ ConversionIsVisible(Oid conid)
|
||||
/*
|
||||
* If it is in the path, it might still not be visible; it could
|
||||
* be hidden by another conversion of the same name earlier in the
|
||||
* path. So we must do a slow check to see if this conversion would
|
||||
* be found by ConversionGetConid.
|
||||
* path. So we must do a slow check to see if this conversion
|
||||
* would be found by ConversionGetConid.
|
||||
*/
|
||||
char *conname = NameStr(conform->conname);
|
||||
|
||||
|
||||
visible = (ConversionGetConid(conname) == conid);
|
||||
}
|
||||
|
||||
@ -1164,13 +1164,13 @@ DeconstructQualifiedName(List *names,
|
||||
if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cross-database references are not implemented")));
|
||||
errmsg("cross-database references are not implemented")));
|
||||
break;
|
||||
default:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("improper qualified name (too many dotted names): %s",
|
||||
NameListToString(names))));
|
||||
errmsg("improper qualified name (too many dotted names): %s",
|
||||
NameListToString(names))));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1281,8 +1281,8 @@ makeRangeVarFromNameList(List *names)
|
||||
default:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("improper relation name (too many dotted names): %s",
|
||||
NameListToString(names))));
|
||||
errmsg("improper relation name (too many dotted names): %s",
|
||||
NameListToString(names))));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1720,8 +1720,8 @@ RemoveTempRelations(Oid tempNamespaceId)
|
||||
|
||||
/*
|
||||
* We want to get rid of everything in the target namespace, but not
|
||||
* the namespace itself (deleting it only to recreate it later would be
|
||||
* a waste of cycles). We do this by finding everything that has a
|
||||
* the namespace itself (deleting it only to recreate it later would
|
||||
* be a waste of cycles). We do this by finding everything that has a
|
||||
* dependency on the namespace.
|
||||
*/
|
||||
object.classId = get_system_catalog_relid(NamespaceRelationName);
|
||||
@ -1797,7 +1797,7 @@ assign_search_path(const char *newval, bool doit, bool interactive)
|
||||
0, 0, 0))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_SCHEMA),
|
||||
errmsg("schema \"%s\" does not exist", curname)));
|
||||
errmsg("schema \"%s\" does not exist", curname)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.61 2003/07/21 01:59:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.62 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -29,8 +29,8 @@
|
||||
#include "utils/syscache.h"
|
||||
|
||||
|
||||
static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
|
||||
Oid *rettype);
|
||||
static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
|
||||
Oid *rettype);
|
||||
|
||||
|
||||
/*
|
||||
@ -79,7 +79,7 @@ AggregateCreate(const char *aggName,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("cannot determine transition datatype"),
|
||||
errdetail("An aggregate using ANYARRAY or ANYELEMENT as "
|
||||
"trans type must have one of them as its base type.")));
|
||||
"trans type must have one of them as its base type.")));
|
||||
|
||||
/* handle transfn */
|
||||
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
|
||||
@ -99,8 +99,8 @@ AggregateCreate(const char *aggName,
|
||||
* enforce_generic_type_consistency, if transtype isn't polymorphic)
|
||||
* must exactly match declared transtype.
|
||||
*
|
||||
* In the non-polymorphic-transtype case, it might be okay to allow
|
||||
* a rettype that's binary-coercible to transtype, but I'm not quite
|
||||
* In the non-polymorphic-transtype case, it might be okay to allow a
|
||||
* rettype that's binary-coercible to transtype, but I'm not quite
|
||||
* convinced that it's either safe or useful. When transtype is
|
||||
* polymorphic we *must* demand exact equality.
|
||||
*/
|
||||
@ -151,9 +151,9 @@ AggregateCreate(const char *aggName,
|
||||
Assert(OidIsValid(finaltype));
|
||||
|
||||
/*
|
||||
* If finaltype (i.e. aggregate return type) is polymorphic,
|
||||
* basetype must be polymorphic also, else parser will fail to deduce
|
||||
* result type. (Note: given the previous test on transtype and basetype,
|
||||
* If finaltype (i.e. aggregate return type) is polymorphic, basetype
|
||||
* must be polymorphic also, else parser will fail to deduce result
|
||||
* type. (Note: given the previous test on transtype and basetype,
|
||||
* this cannot happen, unless someone has snuck a finalfn definition
|
||||
* into the catalogs that itself violates the rule against polymorphic
|
||||
* result with no polymorphic input.)
|
||||
@ -163,8 +163,8 @@ AggregateCreate(const char *aggName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("cannot determine result datatype"),
|
||||
errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
|
||||
"must have one of them as its base type.")));
|
||||
errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
|
||||
"must have one of them as its base type.")));
|
||||
|
||||
/*
|
||||
* Everything looks okay. Try to create the pg_proc entry for the
|
||||
@ -278,21 +278,21 @@ lookup_agg_function(List *fnName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
||||
errmsg("function %s does not exist",
|
||||
func_signature_string(fnName, nargs, input_types))));
|
||||
func_signature_string(fnName, nargs, input_types))));
|
||||
if (retset)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("function %s returns a set",
|
||||
func_signature_string(fnName, nargs, input_types))));
|
||||
func_signature_string(fnName, nargs, input_types))));
|
||||
|
||||
/*
|
||||
* If the given type(s) are all polymorphic, there's nothing we
|
||||
* can check. Otherwise, enforce consistency, and possibly refine
|
||||
* the result type.
|
||||
* If the given type(s) are all polymorphic, there's nothing we can
|
||||
* check. Otherwise, enforce consistency, and possibly refine the
|
||||
* result type.
|
||||
*/
|
||||
if ((input_types[0] == ANYARRAYOID || input_types[0] == ANYELEMENTOID) &&
|
||||
(nargs == 1 ||
|
||||
(input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
|
||||
(input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
|
||||
{
|
||||
/* nothing to check here */
|
||||
}
|
||||
@ -305,8 +305,8 @@ lookup_agg_function(List *fnName,
|
||||
}
|
||||
|
||||
/*
|
||||
* func_get_detail will find functions requiring run-time argument type
|
||||
* coercion, but nodeAgg.c isn't prepared to deal with that
|
||||
* func_get_detail will find functions requiring run-time argument
|
||||
* type coercion, but nodeAgg.c isn't prepared to deal with that
|
||||
*/
|
||||
if (true_oid_array[0] != ANYARRAYOID &&
|
||||
true_oid_array[0] != ANYELEMENTOID &&
|
||||
@ -314,7 +314,7 @@ lookup_agg_function(List *fnName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("function %s requires run-time type coercion",
|
||||
func_signature_string(fnName, nargs, true_oid_array))));
|
||||
func_signature_string(fnName, nargs, true_oid_array))));
|
||||
|
||||
if (nargs == 2 &&
|
||||
true_oid_array[1] != ANYARRAYOID &&
|
||||
@ -323,7 +323,7 @@ lookup_agg_function(List *fnName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("function %s requires run-time type coercion",
|
||||
func_signature_string(fnName, nargs, true_oid_array))));
|
||||
func_signature_string(fnName, nargs, true_oid_array))));
|
||||
|
||||
return fnOid;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.14 2003/07/21 01:59:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.15 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -195,7 +195,7 @@ CreateConstraintEntry(const char *constraintName,
|
||||
/*
|
||||
* Register auto dependency from constraint to owning domain
|
||||
*/
|
||||
ObjectAddress domobject;
|
||||
ObjectAddress domobject;
|
||||
|
||||
domobject.classId = RelOid_pg_type;
|
||||
domobject.objectId = domainId;
|
||||
@ -234,8 +234,8 @@ CreateConstraintEntry(const char *constraintName,
|
||||
if (OidIsValid(indexRelId))
|
||||
{
|
||||
/*
|
||||
* Register normal dependency on the unique index that supports
|
||||
* a foreign-key constraint.
|
||||
* Register normal dependency on the unique index that supports a
|
||||
* foreign-key constraint.
|
||||
*/
|
||||
ObjectAddress relobject;
|
||||
|
||||
@ -438,8 +438,8 @@ RemoveConstraintById(Oid conId)
|
||||
Relation rel;
|
||||
|
||||
/*
|
||||
* If the constraint is for a relation, open and exclusive-lock the
|
||||
* relation it's for.
|
||||
* If the constraint is for a relation, open and exclusive-lock
|
||||
* the relation it's for.
|
||||
*/
|
||||
rel = heap_open(con->conrelid, AccessExclusiveLock);
|
||||
|
||||
@ -463,7 +463,7 @@ RemoveConstraintById(Oid conId)
|
||||
con->conrelid);
|
||||
classForm = (Form_pg_class) GETSTRUCT(relTup);
|
||||
|
||||
if (classForm->relchecks == 0) /* should not happen */
|
||||
if (classForm->relchecks == 0) /* should not happen */
|
||||
elog(ERROR, "relation \"%s\" has relchecks = 0",
|
||||
RelationGetRelationName(rel));
|
||||
classForm->relchecks--;
|
||||
@ -483,16 +483,15 @@ RemoveConstraintById(Oid conId)
|
||||
else if (OidIsValid(con->contypid))
|
||||
{
|
||||
/*
|
||||
* XXX for now, do nothing special when dropping a domain constraint
|
||||
* XXX for now, do nothing special when dropping a domain
|
||||
* constraint
|
||||
*
|
||||
* Probably there should be some form of locking on the domain type,
|
||||
* but we have no such concept at the moment.
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(ERROR, "constraint %u is not of a known type", conId);
|
||||
}
|
||||
|
||||
/* Fry the constraint itself */
|
||||
simple_heap_delete(conDesc, &tup->t_self);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.13 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.14 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -76,9 +76,9 @@ ConversionCreate(const char *conname, Oid connamespace,
|
||||
contoencoding))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("default conversion for %s to %s already exists",
|
||||
pg_encoding_to_char(conforencoding),
|
||||
pg_encoding_to_char(contoencoding))));
|
||||
errmsg("default conversion for %s to %s already exists",
|
||||
pg_encoding_to_char(conforencoding),
|
||||
pg_encoding_to_char(contoencoding))));
|
||||
}
|
||||
|
||||
/* open pg_conversion */
|
||||
@ -147,7 +147,7 @@ ConversionDrop(Oid conversionOid, DropBehavior behavior)
|
||||
if (!superuser() &&
|
||||
((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId())
|
||||
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
|
||||
NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
|
||||
NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.81 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.82 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* these routines moved here from commands/define.c and somewhat cleaned up.
|
||||
@ -409,7 +409,7 @@ OperatorCreate(const char *operatorName,
|
||||
if (!OidIsValid(leftTypeId) && !OidIsValid(rightTypeId))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("at least one of leftarg or rightarg must be specified")));
|
||||
errmsg("at least one of leftarg or rightarg must be specified")));
|
||||
|
||||
if (!(OidIsValid(leftTypeId) && OidIsValid(rightTypeId)))
|
||||
{
|
||||
@ -417,11 +417,11 @@ OperatorCreate(const char *operatorName,
|
||||
if (commutatorName)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("only binary operators can have commutators")));
|
||||
errmsg("only binary operators can have commutators")));
|
||||
if (joinName)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("only binary operators can have join selectivity")));
|
||||
errmsg("only binary operators can have join selectivity")));
|
||||
if (canHash)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.102 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.103 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -94,7 +94,7 @@ ProcedureCreate(const char *procedureName,
|
||||
*/
|
||||
if (returnType == ANYARRAYOID || returnType == ANYELEMENTOID)
|
||||
{
|
||||
bool genericParam = false;
|
||||
bool genericParam = false;
|
||||
|
||||
for (i = 0; i < parameterCount; i++)
|
||||
{
|
||||
@ -231,7 +231,7 @@ ProcedureCreate(const char *procedureName,
|
||||
returnsSet != oldproc->proretset)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("cannot change return type of existing function"),
|
||||
errmsg("cannot change return type of existing function"),
|
||||
errhint("Use DROP FUNCTION first.")));
|
||||
|
||||
/* Can't change aggregate status, either */
|
||||
@ -339,8 +339,8 @@ ProcedureCreate(const char *procedureName,
|
||||
*
|
||||
* This is normally applied during function definition, but in the case
|
||||
* of a function with polymorphic arguments, we instead apply it during
|
||||
* function execution startup. The rettype is then the actual resolved
|
||||
* output type of the function, rather than the declared type. (Therefore,
|
||||
* function execution startup. The rettype is then the actual resolved
|
||||
* output type of the function, rather than the declared type. (Therefore,
|
||||
* we should never see ANYARRAY or ANYELEMENT as rettype.)
|
||||
*/
|
||||
void
|
||||
@ -366,7 +366,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Function's final statement must be a SELECT.")));
|
||||
errdetail("Function's final statement must be a SELECT.")));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -395,9 +395,9 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
|
||||
if (cmd != CMD_SELECT)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Function's final statement must be a SELECT.")));
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Function's final statement must be a SELECT.")));
|
||||
|
||||
/*
|
||||
* Count the non-junk entries in the result targetlist.
|
||||
@ -421,7 +421,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Final SELECT must return exactly one column.")));
|
||||
errdetail("Final SELECT must return exactly one column.")));
|
||||
|
||||
restype = ((TargetEntry *) lfirst(tlist))->resdom->restype;
|
||||
if (!IsBinaryCoercible(restype, rettype))
|
||||
@ -481,7 +481,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Final SELECT returns too many columns.")));
|
||||
errdetail("Final SELECT returns too many columns.")));
|
||||
attr = reln->rd_att->attrs[colindex - 1];
|
||||
} while (attr->attisdropped);
|
||||
rellogcols++;
|
||||
@ -538,8 +538,8 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type %s is not supported for SQL functions",
|
||||
format_type_be(rettype))));
|
||||
errmsg("return type %s is not supported for SQL functions",
|
||||
format_type_be(rettype))));
|
||||
}
|
||||
|
||||
|
||||
@ -684,8 +684,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("SQL functions cannot have arguments of type %s",
|
||||
format_type_be(proc->proargtypes[i]))));
|
||||
errmsg("SQL functions cannot have arguments of type %s",
|
||||
format_type_be(proc->proargtypes[i]))));
|
||||
}
|
||||
}
|
||||
|
||||
@ -696,13 +696,13 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
|
||||
prosrc = DatumGetCString(DirectFunctionCall1(textout, tmp));
|
||||
|
||||
/*
|
||||
* We can't do full prechecking of the function definition if there are
|
||||
* any polymorphic input types, because actual datatypes of expression
|
||||
* results will be unresolvable. The check will be done at runtime
|
||||
* instead.
|
||||
* We can't do full prechecking of the function definition if there
|
||||
* are any polymorphic input types, because actual datatypes of
|
||||
* expression results will be unresolvable. The check will be done at
|
||||
* runtime instead.
|
||||
*
|
||||
* We can run the text through the raw parser though; this will at
|
||||
* least catch silly syntactic errors.
|
||||
* We can run the text through the raw parser though; this will at least
|
||||
* catch silly syntactic errors.
|
||||
*/
|
||||
if (!haspolyarg)
|
||||
{
|
||||
@ -712,9 +712,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
|
||||
check_sql_fn_retval(proc->prorettype, functyptype, querytree_list);
|
||||
}
|
||||
else
|
||||
{
|
||||
querytree_list = pg_parse_query(prosrc);
|
||||
}
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.88 2003/07/21 01:59:11 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.89 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -359,7 +359,8 @@ TypeCreate(const char *typeName,
|
||||
void
|
||||
GenerateTypeDependencies(Oid typeNamespace,
|
||||
Oid typeObjectId,
|
||||
Oid relationOid, /* only for 'c'atalog types */
|
||||
Oid relationOid, /* only for 'c'atalog
|
||||
* types */
|
||||
char relationKind, /* ditto */
|
||||
Oid inputProcedure,
|
||||
Oid outputProcedure,
|
||||
@ -426,13 +427,13 @@ GenerateTypeDependencies(Oid typeNamespace,
|
||||
|
||||
/*
|
||||
* If the type is a rowtype for a relation, mark it as internally
|
||||
* dependent on the relation, *unless* it is a stand-alone
|
||||
* composite type relation. For the latter case, we have to
|
||||
* reverse the dependency.
|
||||
* dependent on the relation, *unless* it is a stand-alone composite
|
||||
* type relation. For the latter case, we have to reverse the
|
||||
* dependency.
|
||||
*
|
||||
* In the former case, this allows the type to be auto-dropped when
|
||||
* the relation is, and not otherwise. And in the latter, of
|
||||
* course we get the opposite effect.
|
||||
* In the former case, this allows the type to be auto-dropped when the
|
||||
* relation is, and not otherwise. And in the latter, of course we get
|
||||
* the opposite effect.
|
||||
*/
|
||||
if (OidIsValid(relationOid))
|
||||
{
|
||||
@ -447,11 +448,11 @@ GenerateTypeDependencies(Oid typeNamespace,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the type is an array type, mark it auto-dependent on the
|
||||
* base type. (This is a compromise between the typical case
|
||||
* where the array type is automatically generated and the case
|
||||
* where it is manually created: we'd prefer INTERNAL for the
|
||||
* former case and NORMAL for the latter.)
|
||||
* If the type is an array type, mark it auto-dependent on the base
|
||||
* type. (This is a compromise between the typical case where the
|
||||
* array type is automatically generated and the case where it is
|
||||
* manually created: we'd prefer INTERNAL for the former case and
|
||||
* NORMAL for the latter.)
|
||||
*/
|
||||
if (OidIsValid(elementType))
|
||||
{
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.12 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.13 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@ -256,16 +256,16 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
|
||||
if (basetypeOid == ANYOID)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_FUNCTION),
|
||||
errmsg("function %s(*) already exists in schema \"%s\"",
|
||||
newname,
|
||||
get_namespace_name(namespaceOid))));
|
||||
errmsg("function %s(*) already exists in schema \"%s\"",
|
||||
newname,
|
||||
get_namespace_name(namespaceOid))));
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_FUNCTION),
|
||||
errmsg("function %s already exists in schema \"%s\"",
|
||||
funcname_signature_string(newname,
|
||||
procForm->pronargs,
|
||||
procForm->proargtypes),
|
||||
procForm->proargtypes),
|
||||
get_namespace_name(namespaceOid))));
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.4 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.5 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -79,52 +79,52 @@ ExecRenameStmt(RenameStmt *stmt)
|
||||
case OBJECT_TABLE:
|
||||
case OBJECT_COLUMN:
|
||||
case OBJECT_TRIGGER:
|
||||
{
|
||||
Oid relid;
|
||||
|
||||
CheckRelationOwnership(stmt->relation, true);
|
||||
|
||||
relid = RangeVarGetRelid(stmt->relation, false);
|
||||
|
||||
switch (stmt->renameType)
|
||||
{
|
||||
case OBJECT_TABLE:
|
||||
Oid relid;
|
||||
|
||||
CheckRelationOwnership(stmt->relation, true);
|
||||
|
||||
relid = RangeVarGetRelid(stmt->relation, false);
|
||||
|
||||
switch (stmt->renameType)
|
||||
{
|
||||
/*
|
||||
* RENAME TABLE requires that we (still) hold
|
||||
* CREATE rights on the containing namespace, as
|
||||
* well as ownership of the table.
|
||||
*/
|
||||
Oid namespaceId = get_rel_namespace(relid);
|
||||
AclResult aclresult;
|
||||
case OBJECT_TABLE:
|
||||
{
|
||||
/*
|
||||
* RENAME TABLE requires that we (still) hold
|
||||
* CREATE rights on the containing namespace,
|
||||
* as well as ownership of the table.
|
||||
*/
|
||||
Oid namespaceId = get_rel_namespace(relid);
|
||||
AclResult aclresult;
|
||||
|
||||
aclresult = pg_namespace_aclcheck(namespaceId,
|
||||
GetUserId(),
|
||||
ACL_CREATE);
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
|
||||
get_namespace_name(namespaceId));
|
||||
aclresult = pg_namespace_aclcheck(namespaceId,
|
||||
GetUserId(),
|
||||
ACL_CREATE);
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
|
||||
get_namespace_name(namespaceId));
|
||||
|
||||
renamerel(relid, stmt->newname);
|
||||
break;
|
||||
}
|
||||
case OBJECT_COLUMN:
|
||||
renameatt(relid,
|
||||
stmt->subname, /* old att name */
|
||||
stmt->newname, /* new att name */
|
||||
renamerel(relid, stmt->newname);
|
||||
break;
|
||||
}
|
||||
case OBJECT_COLUMN:
|
||||
renameatt(relid,
|
||||
stmt->subname, /* old att name */
|
||||
stmt->newname, /* new att name */
|
||||
interpretInhOption(stmt->relation->inhOpt), /* recursive? */
|
||||
false); /* recursing already? */
|
||||
break;
|
||||
case OBJECT_TRIGGER:
|
||||
renametrig(relid,
|
||||
stmt->subname, /* old att name */
|
||||
stmt->newname); /* new att name */
|
||||
break;
|
||||
default:
|
||||
/*can't happen*/;
|
||||
false); /* recursing already? */
|
||||
break;
|
||||
case OBJECT_TRIGGER:
|
||||
renametrig(relid,
|
||||
stmt->subname, /* old att name */
|
||||
stmt->newname); /* new att name */
|
||||
break;
|
||||
default:
|
||||
/* can't happen */ ;
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
elog(ERROR, "unrecognized rename stmt type: %d",
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.56 2003/07/20 21:56:32 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.57 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -220,9 +220,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
|
||||
/*
|
||||
* Silently ignore tables that are temp tables of other backends ---
|
||||
* trying to analyze these is rather pointless, since their
|
||||
* contents are probably not up-to-date on disk. (We don't throw a
|
||||
* warning here; it would just lead to chatter during a database-wide
|
||||
* trying to analyze these is rather pointless, since their contents
|
||||
* are probably not up-to-date on disk. (We don't throw a warning
|
||||
* here; it would just lead to chatter during a database-wide
|
||||
* ANALYZE.)
|
||||
*/
|
||||
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.96 2003/07/20 21:56:32 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.97 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -603,10 +603,10 @@ Async_NotifyHandler(SIGNAL_ARGS)
|
||||
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
|
||||
|
||||
/*
|
||||
* We may be called while ImmediateInterruptOK is true; turn it off
|
||||
* while messing with the NOTIFY state. (We would have to save
|
||||
* and restore it anyway, because PGSemaphore operations inside
|
||||
* ProcessIncomingNotify() might reset it.)
|
||||
* We may be called while ImmediateInterruptOK is true; turn it
|
||||
* off while messing with the NOTIFY state. (We would have to
|
||||
* save and restore it anyway, because PGSemaphore operations
|
||||
* inside ProcessIncomingNotify() might reset it.)
|
||||
*/
|
||||
ImmediateInterruptOK = false;
|
||||
|
||||
@ -639,7 +639,8 @@ Async_NotifyHandler(SIGNAL_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore ImmediateInterruptOK, and check for interrupts if needed.
|
||||
* Restore ImmediateInterruptOK, and check for interrupts if
|
||||
* needed.
|
||||
*/
|
||||
ImmediateInterruptOK = save_ImmediateInterruptOK;
|
||||
if (save_ImmediateInterruptOK)
|
||||
|
@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.112 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.113 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -58,12 +58,12 @@ typedef struct
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
Oid tableOid;
|
||||
Oid indexOid;
|
||||
} RelToCluster;
|
||||
Oid tableOid;
|
||||
Oid indexOid;
|
||||
} RelToCluster;
|
||||
|
||||
|
||||
static void cluster_rel(RelToCluster *rv, bool recheck);
|
||||
static void cluster_rel(RelToCluster * rv, bool recheck);
|
||||
static Oid make_new_heap(Oid OIDOldHeap, const char *NewName);
|
||||
static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
|
||||
static List *get_indexattr_list(Relation OldHeap, Oid OldIndex);
|
||||
@ -74,7 +74,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
|
||||
|
||||
|
||||
/*---------------------------------------------------------------------------
|
||||
* This cluster code allows for clustering multiple tables at once. Because
|
||||
* This cluster code allows for clustering multiple tables at once. Because
|
||||
* of this, we cannot just run everything on a single transaction, or we
|
||||
* would be forced to acquire exclusive locks on all the tables being
|
||||
* clustered, simultaneously --- very likely leading to deadlock.
|
||||
@ -82,17 +82,17 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
|
||||
* To solve this we follow a similar strategy to VACUUM code,
|
||||
* clustering each relation in a separate transaction. For this to work,
|
||||
* we need to:
|
||||
* - provide a separate memory context so that we can pass information in
|
||||
* a way that survives across transactions
|
||||
* - start a new transaction every time a new relation is clustered
|
||||
* - check for validity of the information on to-be-clustered relations,
|
||||
* as someone might have deleted a relation behind our back, or
|
||||
* clustered one on a different index
|
||||
* - end the transaction
|
||||
* - provide a separate memory context so that we can pass information in
|
||||
* a way that survives across transactions
|
||||
* - start a new transaction every time a new relation is clustered
|
||||
* - check for validity of the information on to-be-clustered relations,
|
||||
* as someone might have deleted a relation behind our back, or
|
||||
* clustered one on a different index
|
||||
* - end the transaction
|
||||
*
|
||||
* The single-relation case does not have any such overhead.
|
||||
*
|
||||
* We also allow a relation being specified without index. In that case,
|
||||
* We also allow a relation being specified without index. In that case,
|
||||
* the indisclustered bit will be looked up, and an ERROR will be thrown
|
||||
* if there is no index with the bit set.
|
||||
*---------------------------------------------------------------------------
|
||||
@ -103,10 +103,10 @@ cluster(ClusterStmt *stmt)
|
||||
if (stmt->relation != NULL)
|
||||
{
|
||||
/* This is the single-relation case. */
|
||||
Oid tableOid,
|
||||
indexOid = InvalidOid;
|
||||
Relation rel;
|
||||
RelToCluster rvtc;
|
||||
Oid tableOid,
|
||||
indexOid = InvalidOid;
|
||||
Relation rel;
|
||||
RelToCluster rvtc;
|
||||
|
||||
/* Find and lock the table */
|
||||
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
|
||||
@ -123,10 +123,10 @@ cluster(ClusterStmt *stmt)
|
||||
List *index;
|
||||
|
||||
/* We need to find the index that has indisclustered set. */
|
||||
foreach (index, RelationGetIndexList(rel))
|
||||
foreach(index, RelationGetIndexList(rel))
|
||||
{
|
||||
HeapTuple idxtuple;
|
||||
Form_pg_index indexForm;
|
||||
HeapTuple idxtuple;
|
||||
Form_pg_index indexForm;
|
||||
|
||||
indexOid = lfirsto(index);
|
||||
idxtuple = SearchSysCache(INDEXRELID,
|
||||
@ -152,14 +152,17 @@ cluster(ClusterStmt *stmt)
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The index is expected to be in the same namespace as the relation. */
|
||||
/*
|
||||
* The index is expected to be in the same namespace as the
|
||||
* relation.
|
||||
*/
|
||||
indexOid = get_relname_relid(stmt->indexname,
|
||||
rel->rd_rel->relnamespace);
|
||||
if (!OidIsValid(indexOid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("index \"%s\" for table \"%s\" does not exist",
|
||||
stmt->indexname, stmt->relation->relname)));
|
||||
errmsg("index \"%s\" for table \"%s\" does not exist",
|
||||
stmt->indexname, stmt->relation->relname)));
|
||||
}
|
||||
|
||||
/* All other checks are done in cluster_rel() */
|
||||
@ -175,16 +178,16 @@ cluster(ClusterStmt *stmt)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* This is the "multi relation" case. We need to cluster all tables
|
||||
* that have some index with indisclustered set.
|
||||
* This is the "multi relation" case. We need to cluster all
|
||||
* tables that have some index with indisclustered set.
|
||||
*/
|
||||
MemoryContext cluster_context;
|
||||
List *rv,
|
||||
*rvs;
|
||||
MemoryContext cluster_context;
|
||||
List *rv,
|
||||
*rvs;
|
||||
|
||||
/*
|
||||
* We cannot run this form of CLUSTER inside a user transaction block;
|
||||
* we'd be holding locks way too long.
|
||||
* We cannot run this form of CLUSTER inside a user transaction
|
||||
* block; we'd be holding locks way too long.
|
||||
*/
|
||||
PreventTransactionChain((void *) stmt, "CLUSTER");
|
||||
|
||||
@ -201,8 +204,8 @@ cluster(ClusterStmt *stmt)
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
/*
|
||||
* Build the list of relations to cluster. Note that this lives in
|
||||
* cluster_context.
|
||||
* Build the list of relations to cluster. Note that this lives
|
||||
* in cluster_context.
|
||||
*/
|
||||
rvs = get_tables_to_cluster(cluster_context);
|
||||
|
||||
@ -210,13 +213,14 @@ cluster(ClusterStmt *stmt)
|
||||
CommitTransactionCommand();
|
||||
|
||||
/* Ok, now that we've got them all, cluster them one by one */
|
||||
foreach (rv, rvs)
|
||||
foreach(rv, rvs)
|
||||
{
|
||||
RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
|
||||
RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
|
||||
|
||||
/* Start a new transaction for each relation. */
|
||||
StartTransactionCommand();
|
||||
SetQuerySnapshot(); /* might be needed for functions in indexes */
|
||||
SetQuerySnapshot(); /* might be needed for functions in
|
||||
* indexes */
|
||||
cluster_rel(rvtc, true);
|
||||
CommitTransactionCommand();
|
||||
}
|
||||
@ -244,7 +248,7 @@ cluster(ClusterStmt *stmt)
|
||||
* them incrementally while we load the table.
|
||||
*/
|
||||
static void
|
||||
cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
cluster_rel(RelToCluster * rvtc, bool recheck)
|
||||
{
|
||||
Relation OldHeap,
|
||||
OldIndex;
|
||||
@ -256,14 +260,14 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
* Since we may open a new transaction for each relation, we have to
|
||||
* check that the relation still is what we think it is.
|
||||
*
|
||||
* If this is a single-transaction CLUSTER, we can skip these tests.
|
||||
* We *must* skip the one on indisclustered since it would reject an
|
||||
* If this is a single-transaction CLUSTER, we can skip these tests. We
|
||||
* *must* skip the one on indisclustered since it would reject an
|
||||
* attempt to cluster a not-previously-clustered index.
|
||||
*/
|
||||
if (recheck)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
Form_pg_index indexForm;
|
||||
HeapTuple tuple;
|
||||
Form_pg_index indexForm;
|
||||
|
||||
/*
|
||||
* Check if the relation and index still exist before opening them
|
||||
@ -319,10 +323,10 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
RelationGetRelationName(OldHeap))));
|
||||
|
||||
/*
|
||||
* Disallow clustering on incomplete indexes (those that might not index
|
||||
* every row of the relation). We could relax this by making a separate
|
||||
* seqscan pass over the table to copy the missing rows, but that seems
|
||||
* expensive and tedious.
|
||||
* Disallow clustering on incomplete indexes (those that might not
|
||||
* index every row of the relation). We could relax this by making a
|
||||
* separate seqscan pass over the table to copy the missing rows, but
|
||||
* that seems expensive and tedious.
|
||||
*/
|
||||
if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred))
|
||||
ereport(ERROR,
|
||||
@ -334,7 +338,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
|
||||
/*
|
||||
* If the AM doesn't index nulls, then it's a partial index unless
|
||||
* we can prove all the rows are non-null. Note we only need look
|
||||
* we can prove all the rows are non-null. Note we only need look
|
||||
* at the first column; multicolumn-capable AMs are *required* to
|
||||
* index nulls in columns after the first.
|
||||
*/
|
||||
@ -347,7 +351,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot cluster when index access method does not handle nulls"),
|
||||
errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
|
||||
NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
|
||||
NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
|
||||
}
|
||||
else if (colno < 0)
|
||||
{
|
||||
@ -382,7 +386,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot cluster temp tables of other processes")));
|
||||
errmsg("cannot cluster temp tables of other processes")));
|
||||
|
||||
/* Drop relcache refcnt on OldIndex, but keep lock */
|
||||
index_close(OldIndex);
|
||||
@ -397,7 +401,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
* rebuild_relation: rebuild an existing relation
|
||||
*
|
||||
* This is shared code between CLUSTER and TRUNCATE. In the TRUNCATE
|
||||
* case, the new relation is built and left empty. In the CLUSTER case,
|
||||
* case, the new relation is built and left empty. In the CLUSTER case,
|
||||
* it is filled with data read from the old relation in the order specified
|
||||
* by the index.
|
||||
*
|
||||
@ -432,6 +436,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
|
||||
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid);
|
||||
|
||||
OIDNewHeap = make_new_heap(tableOid, NewHeapName);
|
||||
|
||||
/*
|
||||
* We don't need CommandCounterIncrement() because make_new_heap did
|
||||
* it.
|
||||
@ -754,8 +759,8 @@ swap_relfilenodes(Oid r1, Oid r2)
|
||||
|
||||
/* swap size statistics too, since new rel has freshly-updated stats */
|
||||
{
|
||||
int4 swap_pages;
|
||||
float4 swap_tuples;
|
||||
int4 swap_pages;
|
||||
float4 swap_tuples;
|
||||
|
||||
swap_pages = relform1->relpages;
|
||||
relform1->relpages = relform2->relpages;
|
||||
@ -857,20 +862,20 @@ swap_relfilenodes(Oid r1, Oid r2)
|
||||
static List *
|
||||
get_tables_to_cluster(MemoryContext cluster_context)
|
||||
{
|
||||
Relation indRelation;
|
||||
HeapScanDesc scan;
|
||||
ScanKeyData entry;
|
||||
HeapTuple indexTuple;
|
||||
Form_pg_index index;
|
||||
MemoryContext old_context;
|
||||
RelToCluster *rvtc;
|
||||
List *rvs = NIL;
|
||||
Relation indRelation;
|
||||
HeapScanDesc scan;
|
||||
ScanKeyData entry;
|
||||
HeapTuple indexTuple;
|
||||
Form_pg_index index;
|
||||
MemoryContext old_context;
|
||||
RelToCluster *rvtc;
|
||||
List *rvs = NIL;
|
||||
|
||||
/*
|
||||
* Get all indexes that have indisclustered set and are owned by
|
||||
* appropriate user. System relations or nailed-in relations cannot ever
|
||||
* have indisclustered set, because CLUSTER will refuse to set it when
|
||||
* called with one of them as argument.
|
||||
* appropriate user. System relations or nailed-in relations cannot
|
||||
* ever have indisclustered set, because CLUSTER will refuse to set it
|
||||
* when called with one of them as argument.
|
||||
*/
|
||||
indRelation = relation_openr(IndexRelationName, AccessShareLock);
|
||||
ScanKeyEntryInitialize(&entry, 0,
|
||||
@ -886,8 +891,8 @@ get_tables_to_cluster(MemoryContext cluster_context)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We have to build the list in a different memory context so
|
||||
* it will survive the cross-transaction processing
|
||||
* We have to build the list in a different memory context so it
|
||||
* will survive the cross-transaction processing
|
||||
*/
|
||||
old_context = MemoryContextSwitchTo(cluster_context);
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.67 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.68 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -383,8 +383,8 @@ CommentAttribute(List *qualname, char *comment)
|
||||
if (attnum == InvalidAttrNumber)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
attrname, RelationGetRelationName(relation))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
attrname, RelationGetRelationName(relation))));
|
||||
|
||||
/* Create the comment using the relation's oid */
|
||||
|
||||
@ -418,16 +418,17 @@ CommentDatabase(List *qualname, char *comment)
|
||||
database = strVal(lfirst(qualname));
|
||||
|
||||
/*
|
||||
* We cannot currently support cross-database comments (since other DBs
|
||||
* cannot see pg_description of this database). So, we reject attempts
|
||||
* to comment on a database other than the current one. Someday this
|
||||
* might be improved, but it would take a redesigned infrastructure.
|
||||
* We cannot currently support cross-database comments (since other
|
||||
* DBs cannot see pg_description of this database). So, we reject
|
||||
* attempts to comment on a database other than the current one.
|
||||
* Someday this might be improved, but it would take a redesigned
|
||||
* infrastructure.
|
||||
*
|
||||
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
|
||||
* of the database. Erroring out would prevent pg_restore from completing
|
||||
* (which is really pg_restore's fault, but for now we will work around
|
||||
* the problem here). Consensus is that the best fix is to treat wrong
|
||||
* database name as a WARNING not an ERROR.
|
||||
* of the database. Erroring out would prevent pg_restore from
|
||||
* completing (which is really pg_restore's fault, but for now we will
|
||||
* work around the problem here). Consensus is that the best fix is
|
||||
* to treat wrong database name as a WARNING not an ERROR.
|
||||
*/
|
||||
|
||||
/* First get the database OID */
|
||||
@ -569,7 +570,7 @@ CommentRule(List *qualname, char *comment)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("there are multiple rules \"%s\"", rulename),
|
||||
errhint("Specify a relation name as well as a rule name.")));
|
||||
errhint("Specify a relation name as well as a rule name.")));
|
||||
|
||||
heap_endscan(scanDesc);
|
||||
heap_close(RewriteRelation, AccessShareLock);
|
||||
@ -811,8 +812,8 @@ CommentTrigger(List *qualname, char *comment)
|
||||
if (!HeapTupleIsValid(triggertuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("trigger \"%s\" for relation \"%s\" does not exist",
|
||||
trigname, RelationGetRelationName(relation))));
|
||||
errmsg("trigger \"%s\" for relation \"%s\" does not exist",
|
||||
trigname, RelationGetRelationName(relation))));
|
||||
|
||||
oid = HeapTupleGetOid(triggertuple);
|
||||
|
||||
@ -891,7 +892,7 @@ CommentConstraint(List *qualname, char *comment)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("relation \"%s\" has multiple constraints named \"%s\"",
|
||||
RelationGetRelationName(relation), conName)));
|
||||
RelationGetRelationName(relation), conName)));
|
||||
conOid = HeapTupleGetOid(tuple);
|
||||
}
|
||||
}
|
||||
@ -902,8 +903,8 @@ CommentConstraint(List *qualname, char *comment)
|
||||
if (!OidIsValid(conOid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("constraint \"%s\" for relation \"%s\" does not exist",
|
||||
conName, RelationGetRelationName(relation))));
|
||||
errmsg("constraint \"%s\" for relation \"%s\" does not exist",
|
||||
conName, RelationGetRelationName(relation))));
|
||||
|
||||
/* Create the comment with the pg_constraint oid */
|
||||
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.9 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.10 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -148,11 +148,11 @@ RenameConversion(List *name, const char *newname)
|
||||
0, 0))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("conversion \"%s\" already exists in schema \"%s\"",
|
||||
newname, get_namespace_name(namespaceOid))));
|
||||
errmsg("conversion \"%s\" already exists in schema \"%s\"",
|
||||
newname, get_namespace_name(namespaceOid))));
|
||||
|
||||
/* must be owner */
|
||||
if (!superuser() &&
|
||||
if (!superuser() &&
|
||||
((Form_pg_conversion) GETSTRUCT(tup))->conowner != GetUserId())
|
||||
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
|
||||
NameListToString(name));
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.205 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.206 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -61,7 +61,7 @@ typedef enum CopyDest
|
||||
COPY_FILE, /* to/from file */
|
||||
COPY_OLD_FE, /* to/from frontend (old protocol) */
|
||||
COPY_NEW_FE /* to/from frontend (new protocol) */
|
||||
} CopyDest;
|
||||
} CopyDest;
|
||||
|
||||
/*
|
||||
* Represents the type of data returned by CopyReadAttribute()
|
||||
@ -82,17 +82,17 @@ typedef enum EolType
|
||||
EOL_NL,
|
||||
EOL_CR,
|
||||
EOL_CRNL
|
||||
} EolType;
|
||||
} EolType;
|
||||
|
||||
|
||||
/* non-export function prototypes */
|
||||
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
char *delim, char *null_print);
|
||||
char *delim, char *null_print);
|
||||
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
char *delim, char *null_print);
|
||||
char *delim, char *null_print);
|
||||
static char *CopyReadAttribute(const char *delim, CopyReadResult *result);
|
||||
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
|
||||
Oid typelem, bool *isnull);
|
||||
Oid typelem, bool *isnull);
|
||||
static void CopyAttributeOut(char *string, char *delim);
|
||||
static List *CopyGetAttnums(Relation rel, List *attnamelist);
|
||||
|
||||
@ -136,6 +136,7 @@ static void CopySendChar(char c);
|
||||
static void CopySendEndOfRow(bool binary);
|
||||
static void CopyGetData(void *databuf, int datasize);
|
||||
static int CopyGetChar(void);
|
||||
|
||||
#define CopyGetEof() (fe_eof)
|
||||
static int CopyPeekChar(void);
|
||||
static void CopyDonePeek(int c, bool pickup);
|
||||
@ -155,14 +156,14 @@ SendCopyBegin(bool binary, int natts)
|
||||
{
|
||||
/* new way */
|
||||
StringInfoData buf;
|
||||
int16 format = (binary ? 1 : 0);
|
||||
int i;
|
||||
int16 format = (binary ? 1 : 0);
|
||||
int i;
|
||||
|
||||
pq_beginmessage(&buf, 'H');
|
||||
pq_sendbyte(&buf, format); /* overall format */
|
||||
pq_sendbyte(&buf, format); /* overall format */
|
||||
pq_sendint(&buf, natts, 2);
|
||||
for (i = 0; i < natts; i++)
|
||||
pq_sendint(&buf, format, 2); /* per-column formats */
|
||||
pq_sendint(&buf, format, 2); /* per-column formats */
|
||||
pq_endmessage(&buf);
|
||||
copy_dest = COPY_NEW_FE;
|
||||
copy_msgbuf = makeStringInfo();
|
||||
@ -200,14 +201,14 @@ ReceiveCopyBegin(bool binary, int natts)
|
||||
{
|
||||
/* new way */
|
||||
StringInfoData buf;
|
||||
int16 format = (binary ? 1 : 0);
|
||||
int i;
|
||||
int16 format = (binary ? 1 : 0);
|
||||
int i;
|
||||
|
||||
pq_beginmessage(&buf, 'G');
|
||||
pq_sendbyte(&buf, format); /* overall format */
|
||||
pq_sendbyte(&buf, format); /* overall format */
|
||||
pq_sendint(&buf, natts, 2);
|
||||
for (i = 0; i < natts; i++)
|
||||
pq_sendint(&buf, format, 2); /* per-column formats */
|
||||
pq_sendint(&buf, format, 2); /* per-column formats */
|
||||
pq_endmessage(&buf);
|
||||
copy_dest = COPY_NEW_FE;
|
||||
copy_msgbuf = makeStringInfo();
|
||||
@ -289,7 +290,7 @@ CopySendData(void *databuf, int datasize)
|
||||
/* no hope of recovering connection sync, so FATAL */
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("connection lost during COPY to stdout")));
|
||||
errmsg("connection lost during COPY to stdout")));
|
||||
}
|
||||
break;
|
||||
case COPY_NEW_FE:
|
||||
@ -378,7 +379,7 @@ CopyGetData(void *databuf, int datasize)
|
||||
case COPY_NEW_FE:
|
||||
while (datasize > 0 && !fe_eof)
|
||||
{
|
||||
int avail;
|
||||
int avail;
|
||||
|
||||
while (copy_msgbuf->cursor >= copy_msgbuf->len)
|
||||
{
|
||||
@ -389,24 +390,24 @@ CopyGetData(void *databuf, int datasize)
|
||||
if (mtype == EOF)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("unexpected EOF on client connection")));
|
||||
errmsg("unexpected EOF on client connection")));
|
||||
if (pq_getmessage(copy_msgbuf, 0))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("unexpected EOF on client connection")));
|
||||
errmsg("unexpected EOF on client connection")));
|
||||
switch (mtype)
|
||||
{
|
||||
case 'd': /* CopyData */
|
||||
case 'd': /* CopyData */
|
||||
break;
|
||||
case 'c': /* CopyDone */
|
||||
case 'c': /* CopyDone */
|
||||
/* COPY IN correctly terminated by frontend */
|
||||
fe_eof = true;
|
||||
return;
|
||||
case 'f': /* CopyFail */
|
||||
case 'f': /* CopyFail */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_QUERY_CANCELED),
|
||||
errmsg("COPY from stdin failed: %s",
|
||||
pq_getmsgstring(copy_msgbuf))));
|
||||
pq_getmsgstring(copy_msgbuf))));
|
||||
break;
|
||||
default:
|
||||
ereport(ERROR,
|
||||
@ -421,7 +422,7 @@ CopyGetData(void *databuf, int datasize)
|
||||
avail = datasize;
|
||||
pq_copymsgbytes(copy_msgbuf, databuf, avail);
|
||||
databuf = (void *) ((char *) databuf + avail);
|
||||
datasize =- avail;
|
||||
datasize = -avail;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -430,7 +431,7 @@ CopyGetData(void *databuf, int datasize)
|
||||
static int
|
||||
CopyGetChar(void)
|
||||
{
|
||||
int ch;
|
||||
int ch;
|
||||
|
||||
switch (copy_dest)
|
||||
{
|
||||
@ -448,16 +449,16 @@ CopyGetChar(void)
|
||||
}
|
||||
break;
|
||||
case COPY_NEW_FE:
|
||||
{
|
||||
unsigned char cc;
|
||||
{
|
||||
unsigned char cc;
|
||||
|
||||
CopyGetData(&cc, 1);
|
||||
if (fe_eof)
|
||||
ch = EOF;
|
||||
else
|
||||
ch = cc;
|
||||
break;
|
||||
}
|
||||
CopyGetData(&cc, 1);
|
||||
if (fe_eof)
|
||||
ch = EOF;
|
||||
else
|
||||
ch = cc;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ch = EOF;
|
||||
break;
|
||||
@ -479,7 +480,7 @@ CopyGetChar(void)
|
||||
static int
|
||||
CopyPeekChar(void)
|
||||
{
|
||||
int ch;
|
||||
int ch;
|
||||
|
||||
switch (copy_dest)
|
||||
{
|
||||
@ -497,16 +498,16 @@ CopyPeekChar(void)
|
||||
}
|
||||
break;
|
||||
case COPY_NEW_FE:
|
||||
{
|
||||
unsigned char cc;
|
||||
{
|
||||
unsigned char cc;
|
||||
|
||||
CopyGetData(&cc, 1);
|
||||
if (fe_eof)
|
||||
ch = EOF;
|
||||
else
|
||||
ch = cc;
|
||||
break;
|
||||
}
|
||||
CopyGetData(&cc, 1);
|
||||
if (fe_eof)
|
||||
ch = EOF;
|
||||
else
|
||||
ch = cc;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ch = EOF;
|
||||
break;
|
||||
@ -524,7 +525,7 @@ CopyDonePeek(int c, bool pickup)
|
||||
switch (copy_dest)
|
||||
{
|
||||
case COPY_FILE:
|
||||
if (!pickup)
|
||||
if (!pickup)
|
||||
{
|
||||
/* We don't want to pick it up - so put it back in there */
|
||||
ungetc(c, copy_file);
|
||||
@ -537,7 +538,11 @@ CopyDonePeek(int c, bool pickup)
|
||||
/* We want to pick it up */
|
||||
(void) pq_getbyte();
|
||||
}
|
||||
/* If we didn't want to pick it up, just leave it where it sits */
|
||||
|
||||
/*
|
||||
* If we didn't want to pick it up, just leave it where it
|
||||
* sits
|
||||
*/
|
||||
break;
|
||||
case COPY_NEW_FE:
|
||||
if (!pickup)
|
||||
@ -737,7 +742,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to COPY to or from a file"),
|
||||
errhint("Anyone can COPY to stdout or from stdin. "
|
||||
"psql's \\copy command also works for anyone.")));
|
||||
"psql's \\copy command also works for anyone.")));
|
||||
|
||||
/*
|
||||
* Presently, only single-character delimiter strings are supported.
|
||||
@ -791,8 +796,8 @@ DoCopy(const CopyStmt *stmt)
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot copy to non-table relation \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
errmsg("cannot copy to non-table relation \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
}
|
||||
if (pipe)
|
||||
{
|
||||
@ -810,8 +815,8 @@ DoCopy(const CopyStmt *stmt)
|
||||
if (copy_file == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open file \"%s\" for reading: %m",
|
||||
filename)));
|
||||
errmsg("could not open file \"%s\" for reading: %m",
|
||||
filename)));
|
||||
|
||||
fstat(fileno(copy_file), &st);
|
||||
if (S_ISDIR(st.st_mode))
|
||||
@ -841,8 +846,8 @@ DoCopy(const CopyStmt *stmt)
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot copy from non-table relation \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
errmsg("cannot copy from non-table relation \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
}
|
||||
if (pipe)
|
||||
{
|
||||
@ -863,7 +868,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
if (!is_absolute_path(filename))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
errmsg("relative path not allowed for COPY to file")));
|
||||
errmsg("relative path not allowed for COPY to file")));
|
||||
|
||||
oumask = umask((mode_t) 022);
|
||||
copy_file = AllocateFile(filename, PG_BINARY_W);
|
||||
@ -872,8 +877,8 @@ DoCopy(const CopyStmt *stmt)
|
||||
if (copy_file == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open file \"%s\" for writing: %m",
|
||||
filename)));
|
||||
errmsg("could not open file \"%s\" for writing: %m",
|
||||
filename)));
|
||||
|
||||
fstat(fileno(copy_file), &st);
|
||||
if (S_ISDIR(st.st_mode))
|
||||
@ -955,8 +960,8 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a temporary memory context that we can reset once per row
|
||||
* to recover palloc'd memory. This avoids any problems with leaks
|
||||
* Create a temporary memory context that we can reset once per row to
|
||||
* recover palloc'd memory. This avoids any problems with leaks
|
||||
* inside datatype output routines, and should be faster than retail
|
||||
* pfree's anyway. (We don't need a whole econtext as CopyFrom does.)
|
||||
*/
|
||||
@ -1040,9 +1045,9 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
if (isnull)
|
||||
{
|
||||
if (!binary)
|
||||
CopySendString(null_print); /* null indicator */
|
||||
CopySendString(null_print); /* null indicator */
|
||||
else
|
||||
CopySendInt32(-1); /* null marker */
|
||||
CopySendInt32(-1); /* null marker */
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1060,7 +1065,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
|
||||
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
|
||||
value,
|
||||
ObjectIdGetDatum(elements[attnum - 1])));
|
||||
ObjectIdGetDatum(elements[attnum - 1])));
|
||||
/* We assume the result will not have been toasted */
|
||||
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
|
||||
CopySendData(VARDATA(outputbytes),
|
||||
@ -1199,7 +1204,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
{
|
||||
/* attribute is NOT to be copied from input */
|
||||
/* use default value if one exists */
|
||||
Node *defexpr = build_column_default(rel, i + 1);
|
||||
Node *defexpr = build_column_default(rel, i + 1);
|
||||
|
||||
if (defexpr != NULL)
|
||||
{
|
||||
@ -1219,10 +1224,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
/*
|
||||
* Easiest way to do this is to use parse_coerce.c to set up
|
||||
* an expression that checks the constraints. (At present,
|
||||
* the expression might contain a length-coercion-function call
|
||||
* and/or CoerceToDomain nodes.) The bottom of the expression
|
||||
* is a Param node so that we can fill in the actual datum during
|
||||
* the data input loop.
|
||||
* the expression might contain a length-coercion-function
|
||||
* call and/or CoerceToDomain nodes.) The bottom of the
|
||||
* expression is a Param node so that we can fill in the
|
||||
* actual datum during the data input loop.
|
||||
*/
|
||||
prm = makeNode(Param);
|
||||
prm->paramkind = PARAM_EXEC;
|
||||
@ -1241,11 +1246,11 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
}
|
||||
|
||||
/*
|
||||
* Check BEFORE STATEMENT insertion triggers. It's debateable
|
||||
* whether we should do this for COPY, since it's not really an
|
||||
* "INSERT" statement as such. However, executing these triggers
|
||||
* maintains consistency with the EACH ROW triggers that we already
|
||||
* fire on COPY.
|
||||
* Check BEFORE STATEMENT insertion triggers. It's debateable whether
|
||||
* we should do this for COPY, since it's not really an "INSERT"
|
||||
* statement as such. However, executing these triggers maintains
|
||||
* consistency with the EACH ROW triggers that we already fire on
|
||||
* COPY.
|
||||
*/
|
||||
ExecBSInsertTriggers(estate, resultRelInfo);
|
||||
|
||||
@ -1276,13 +1281,13 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
if ((tmp >> 16) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("unrecognized critical flags in COPY file header")));
|
||||
errmsg("unrecognized critical flags in COPY file header")));
|
||||
/* Header extension length */
|
||||
tmp = CopyGetInt32();
|
||||
if (CopyGetEof() || tmp < 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("invalid COPY file header (missing length)")));
|
||||
errmsg("invalid COPY file header (missing length)")));
|
||||
/* Skip extension header, if present */
|
||||
while (tmp-- > 0)
|
||||
{
|
||||
@ -1290,7 +1295,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
if (CopyGetEof())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("invalid COPY file header (wrong length)")));
|
||||
errmsg("invalid COPY file header (wrong length)")));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1418,9 +1423,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
/*
|
||||
* Complain if there are more fields on the input line.
|
||||
*
|
||||
* Special case: if we're reading a zero-column table, we
|
||||
* won't yet have called CopyReadAttribute() at all; so do that
|
||||
* and check we have an empty line. Fortunately we can keep that
|
||||
* Special case: if we're reading a zero-column table, we won't
|
||||
* yet have called CopyReadAttribute() at all; so do that and
|
||||
* check we have an empty line. Fortunately we can keep that
|
||||
* silly corner case out of the main line of execution.
|
||||
*/
|
||||
if (result == NORMAL_ATTR)
|
||||
@ -1431,7 +1436,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
if (result == NORMAL_ATTR || *string != '\0')
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("extra data after last expected column")));
|
||||
errmsg("extra data after last expected column")));
|
||||
if (result == END_OF_FILE)
|
||||
{
|
||||
/* EOF at start of line: all is well */
|
||||
@ -1442,7 +1447,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("extra data after last expected column")));
|
||||
errmsg("extra data after last expected column")));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1475,8 +1480,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
{
|
||||
loaded_oid =
|
||||
DatumGetObjectId(CopyReadBinaryAttribute(0,
|
||||
&oid_in_function,
|
||||
oid_in_element,
|
||||
&oid_in_function,
|
||||
oid_in_element,
|
||||
&isnull));
|
||||
if (isnull || loaded_oid == InvalidOid)
|
||||
ereport(ERROR,
|
||||
@ -1531,9 +1536,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
prmdata->isnull = (nulls[i] == 'n');
|
||||
|
||||
/*
|
||||
* Execute the constraint expression. Allow the expression
|
||||
* to replace the value (consider e.g. a timestamp precision
|
||||
* restriction).
|
||||
* Execute the constraint expression. Allow the
|
||||
* expression to replace the value (consider e.g. a
|
||||
* timestamp precision restriction).
|
||||
*/
|
||||
values[i] = ExecEvalExpr(exprstate, econtext,
|
||||
&isnull, NULL);
|
||||
@ -1674,11 +1679,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("literal carriage return found in data"),
|
||||
errhint("Use \"\\r\" to represent carriage return.")));
|
||||
/* Check for \r\n on first line, _and_ handle \r\n. */
|
||||
errhint("Use \"\\r\" to represent carriage return.")));
|
||||
/* Check for \r\n on first line, _and_ handle \r\n. */
|
||||
if (copy_lineno == 1 || eol_type == EOL_CRNL)
|
||||
{
|
||||
int c2 = CopyPeekChar();
|
||||
int c2 = CopyPeekChar();
|
||||
|
||||
if (c2 == '\n')
|
||||
{
|
||||
CopyDonePeek(c2, true); /* eat newline */
|
||||
@ -1690,9 +1696,13 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
|
||||
if (eol_type == EOL_CRNL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("literal carriage return found in data"),
|
||||
errmsg("literal carriage return found in data"),
|
||||
errhint("Use \"\\r\" to represent carriage return.")));
|
||||
/* if we got here, it is the first line and we didn't get \n, so put it back */
|
||||
|
||||
/*
|
||||
* if we got here, it is the first line and we didn't
|
||||
* get \n, so put it back
|
||||
*/
|
||||
CopyDonePeek(c2, false);
|
||||
eol_type = EOL_CR;
|
||||
}
|
||||
@ -1802,12 +1812,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
|
||||
c = CopyGetChar();
|
||||
if (c == '\n')
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("end-of-copy marker does not match previous newline style")));
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("end-of-copy marker does not match previous newline style")));
|
||||
if (c != '\r')
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("end-of-copy marker corrupt")));
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("end-of-copy marker corrupt")));
|
||||
}
|
||||
c = CopyGetChar();
|
||||
if (c != '\r' && c != '\n')
|
||||
@ -1816,21 +1826,20 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
|
||||
errmsg("end-of-copy marker corrupt")));
|
||||
if ((eol_type == EOL_NL && c != '\n') ||
|
||||
(eol_type == EOL_CRNL && c != '\n') ||
|
||||
(eol_type == EOL_CR && c != '\r'))
|
||||
(eol_type == EOL_CR && c != '\r'))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("end-of-copy marker does not match previous newline style")));
|
||||
|
||||
/*
|
||||
* In protocol version 3, we should ignore anything after
|
||||
* \. up to the protocol end of copy data. (XXX maybe
|
||||
* better not to treat \. as special?)
|
||||
* In protocol version 3, we should ignore anything
|
||||
* after \. up to the protocol end of copy data. (XXX
|
||||
* maybe better not to treat \. as special?)
|
||||
*/
|
||||
if (copy_dest == COPY_NEW_FE)
|
||||
{
|
||||
while (c != EOF)
|
||||
{
|
||||
c = CopyGetChar();
|
||||
}
|
||||
}
|
||||
*result = END_OF_FILE;
|
||||
goto copy_eof;
|
||||
@ -2045,8 +2054,8 @@ CopyGetAttnums(Relation rel, List *attnamelist)
|
||||
if (intMember(attnum, attnums))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_COLUMN),
|
||||
errmsg("attribute \"%s\" specified more than once",
|
||||
name)));
|
||||
errmsg("attribute \"%s\" specified more than once",
|
||||
name)));
|
||||
attnums = lappendi(attnums, attnum);
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.119 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.120 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -200,7 +200,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
if (dbpath != NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use an alternate location on this platform")));
|
||||
errmsg("cannot use an alternate location on this platform")));
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -260,8 +260,8 @@ createdb(const CreatedbStmt *stmt)
|
||||
if (DatabaseHasActiveBackends(src_dboid, true))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_IN_USE),
|
||||
errmsg("source database \"%s\" is being accessed by other users",
|
||||
dbtemplate)));
|
||||
errmsg("source database \"%s\" is being accessed by other users",
|
||||
dbtemplate)));
|
||||
|
||||
/* If encoding is defaulted, use source's encoding */
|
||||
if (encoding < 0)
|
||||
@ -345,7 +345,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
/* Make the symlink, if needed */
|
||||
if (alt_loc)
|
||||
{
|
||||
#ifdef HAVE_SYMLINK /* already throws error above */
|
||||
#ifdef HAVE_SYMLINK /* already throws error above */
|
||||
if (symlink(alt_loc, nominal_loc) != 0)
|
||||
#endif
|
||||
ereport(ERROR,
|
||||
@ -450,7 +450,7 @@ dropdb(const char *dbname)
|
||||
char *nominal_loc;
|
||||
char dbpath[MAXPGPATH];
|
||||
Relation pgdbrel;
|
||||
SysScanDesc pgdbscan;
|
||||
SysScanDesc pgdbscan;
|
||||
ScanKeyData key;
|
||||
HeapTuple tup;
|
||||
|
||||
@ -503,8 +503,8 @@ dropdb(const char *dbname)
|
||||
if (DatabaseHasActiveBackends(db_id, false))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_IN_USE),
|
||||
errmsg("database \"%s\" is being accessed by other users",
|
||||
dbname)));
|
||||
errmsg("database \"%s\" is being accessed by other users",
|
||||
dbname)));
|
||||
|
||||
/*
|
||||
* Find the database's tuple by OID (should be unique).
|
||||
@ -577,10 +577,13 @@ dropdb(const char *dbname)
|
||||
void
|
||||
RenameDatabase(const char *oldname, const char *newname)
|
||||
{
|
||||
HeapTuple tup, newtup;
|
||||
HeapTuple tup,
|
||||
newtup;
|
||||
Relation rel;
|
||||
SysScanDesc scan, scan2;
|
||||
ScanKeyData key, key2;
|
||||
SysScanDesc scan,
|
||||
scan2;
|
||||
ScanKeyData key,
|
||||
key2;
|
||||
|
||||
/*
|
||||
* Obtain AccessExclusiveLock so that no new session gets started
|
||||
@ -610,15 +613,14 @@ RenameDatabase(const char *oldname, const char *newname)
|
||||
errmsg("current database may not be renamed")));
|
||||
|
||||
/*
|
||||
* Make sure the database does not have active sessions. Might
|
||||
* not be necessary, but it's consistent with other database
|
||||
* operations.
|
||||
* Make sure the database does not have active sessions. Might not be
|
||||
* necessary, but it's consistent with other database operations.
|
||||
*/
|
||||
if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_IN_USE),
|
||||
errmsg("database \"%s\" is being accessed by other users",
|
||||
oldname)));
|
||||
errmsg("database \"%s\" is being accessed by other users",
|
||||
oldname)));
|
||||
|
||||
/* make sure the new name doesn't exist */
|
||||
ScanKeyEntryInitialize(&key2, 0, Anum_pg_database_datname,
|
||||
@ -651,10 +653,10 @@ RenameDatabase(const char *oldname, const char *newname)
|
||||
heap_close(rel, NoLock);
|
||||
|
||||
/*
|
||||
* Force dirty buffers out to disk, so that newly-connecting
|
||||
* backends will see the renamed database in pg_database right
|
||||
* away. (They'll see an uncommitted tuple, but they don't care;
|
||||
* see GetRawDatabaseInfo.)
|
||||
* Force dirty buffers out to disk, so that newly-connecting backends
|
||||
* will see the renamed database in pg_database right away. (They'll
|
||||
* see an uncommitted tuple, but they don't care; see
|
||||
* GetRawDatabaseInfo.)
|
||||
*/
|
||||
BufferSync();
|
||||
}
|
||||
@ -671,7 +673,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
|
||||
newtuple;
|
||||
Relation rel;
|
||||
ScanKeyData scankey;
|
||||
SysScanDesc scan;
|
||||
SysScanDesc scan;
|
||||
Datum repl_val[Natts_pg_database];
|
||||
char repl_null[Natts_pg_database];
|
||||
char repl_repl[Natts_pg_database];
|
||||
@ -689,9 +691,9 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
|
||||
errmsg("database \"%s\" does not exist", stmt->dbname)));
|
||||
|
||||
if (!(superuser()
|
||||
|| ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
|
||||
|| ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
|
||||
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
|
||||
stmt->dbname);
|
||||
stmt->dbname);
|
||||
|
||||
MemSet(repl_repl, ' ', sizeof(repl_repl));
|
||||
repl_repl[Anum_pg_database_datconfig - 1] = 'r';
|
||||
@ -750,7 +752,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
|
||||
{
|
||||
Relation relation;
|
||||
ScanKeyData scanKey;
|
||||
SysScanDesc scan;
|
||||
SysScanDesc scan;
|
||||
HeapTuple tuple;
|
||||
bool gottuple;
|
||||
|
||||
@ -862,7 +864,7 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
|
||||
#ifndef ALLOW_ABSOLUTE_DBPATHS
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("absolute paths are not allowed as database locations")));
|
||||
errmsg("absolute paths are not allowed as database locations")));
|
||||
#endif
|
||||
prefix = dbpath;
|
||||
}
|
||||
@ -874,8 +876,8 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
|
||||
if (!var)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("postmaster environment variable \"%s\" not found",
|
||||
dbpath)));
|
||||
errmsg("postmaster environment variable \"%s\" not found",
|
||||
dbpath)));
|
||||
if (!is_absolute_path(var))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
@ -955,7 +957,7 @@ get_database_oid(const char *dbname)
|
||||
{
|
||||
Relation pg_database;
|
||||
ScanKeyData entry[1];
|
||||
SysScanDesc scan;
|
||||
SysScanDesc scan;
|
||||
HeapTuple dbtuple;
|
||||
Oid oid;
|
||||
|
||||
@ -993,7 +995,7 @@ get_database_name(Oid dbid)
|
||||
{
|
||||
Relation pg_database;
|
||||
ScanKeyData entry[1];
|
||||
SysScanDesc scan;
|
||||
SysScanDesc scan;
|
||||
HeapTuple dbtuple;
|
||||
char *result;
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.82 2003/07/20 21:56:32 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.83 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@ -51,7 +51,8 @@ case_translate_language_name(const char *input, char *output)
|
||||
{
|
||||
int i;
|
||||
|
||||
MemSet(output, 0, NAMEDATALEN); /* ensure result Name is zero-filled */
|
||||
MemSet(output, 0, NAMEDATALEN); /* ensure result Name is
|
||||
* zero-filled */
|
||||
|
||||
for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
|
||||
output[i] = tolower((unsigned char) input[i]);
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994-5, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.111 2003/07/20 21:56:32 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.112 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -45,11 +45,11 @@ typedef struct ExplainState
|
||||
|
||||
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
|
||||
TupOutputState *tstate);
|
||||
static double elapsed_time(struct timeval *starttime);
|
||||
static double elapsed_time(struct timeval * starttime);
|
||||
static void explain_outNode(StringInfo str,
|
||||
Plan *plan, PlanState *planstate,
|
||||
Plan *outer_plan,
|
||||
int indent, ExplainState *es);
|
||||
Plan *plan, PlanState * planstate,
|
||||
Plan *outer_plan,
|
||||
int indent, ExplainState *es);
|
||||
static void show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
|
||||
int scanrelid, Plan *outer_plan,
|
||||
StringInfo str, int indent, ExplainState *es);
|
||||
@ -58,8 +58,8 @@ static void show_upper_qual(List *qual, const char *qlabel,
|
||||
const char *inner_name, int inner_varno, Plan *inner_plan,
|
||||
StringInfo str, int indent, ExplainState *es);
|
||||
static void show_sort_keys(List *tlist, int nkeys, AttrNumber *keycols,
|
||||
const char *qlabel,
|
||||
StringInfo str, int indent, ExplainState *es);
|
||||
const char *qlabel,
|
||||
StringInfo str, int indent, ExplainState *es);
|
||||
static Node *make_ors_ands_explicit(List *orclauses);
|
||||
|
||||
/*
|
||||
@ -255,8 +255,8 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
|
||||
}
|
||||
|
||||
/*
|
||||
* Close down the query and free resources. Include time for this
|
||||
* in the total runtime.
|
||||
* Close down the query and free resources. Include time for this in
|
||||
* the total runtime.
|
||||
*/
|
||||
gettimeofday(&starttime, NULL);
|
||||
|
||||
@ -282,7 +282,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
|
||||
|
||||
/* Compute elapsed time in seconds since given gettimeofday() timestamp */
|
||||
static double
|
||||
elapsed_time(struct timeval *starttime)
|
||||
elapsed_time(struct timeval * starttime)
|
||||
{
|
||||
struct timeval endtime;
|
||||
|
||||
@ -313,7 +313,7 @@ elapsed_time(struct timeval *starttime)
|
||||
*/
|
||||
static void
|
||||
explain_outNode(StringInfo str,
|
||||
Plan *plan, PlanState *planstate,
|
||||
Plan *plan, PlanState * planstate,
|
||||
Plan *outer_plan,
|
||||
int indent, ExplainState *es)
|
||||
{
|
||||
@ -542,8 +542,8 @@ explain_outNode(StringInfo str,
|
||||
/*
|
||||
* If the expression is still a function call, we can get
|
||||
* the real name of the function. Otherwise, punt (this
|
||||
* can happen if the optimizer simplified away the function
|
||||
* call, for example).
|
||||
* can happen if the optimizer simplified away the
|
||||
* function call, for example).
|
||||
*/
|
||||
if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr))
|
||||
{
|
||||
@ -583,15 +583,13 @@ explain_outNode(StringInfo str,
|
||||
double nloops = planstate->instrument->nloops;
|
||||
|
||||
appendStringInfo(str, " (actual time=%.2f..%.2f rows=%.0f loops=%.0f)",
|
||||
1000.0 * planstate->instrument->startup / nloops,
|
||||
1000.0 * planstate->instrument->total / nloops,
|
||||
1000.0 * planstate->instrument->startup / nloops,
|
||||
1000.0 * planstate->instrument->total / nloops,
|
||||
planstate->instrument->ntuples / nloops,
|
||||
planstate->instrument->nloops);
|
||||
}
|
||||
else if (es->printAnalyze)
|
||||
{
|
||||
appendStringInfo(str, " (never executed)");
|
||||
}
|
||||
}
|
||||
appendStringInfoChar(str, '\n');
|
||||
|
||||
@ -709,7 +707,7 @@ explain_outNode(StringInfo str,
|
||||
foreach(lst, planstate->initPlan)
|
||||
{
|
||||
SubPlanState *sps = (SubPlanState *) lfirst(lst);
|
||||
SubPlan *sp = (SubPlan *) sps->xprstate.expr;
|
||||
SubPlan *sp = (SubPlan *) sps->xprstate.expr;
|
||||
|
||||
es->rtable = sp->rtable;
|
||||
for (i = 0; i < indent; i++)
|
||||
@ -807,7 +805,7 @@ explain_outNode(StringInfo str,
|
||||
foreach(lst, planstate->subPlan)
|
||||
{
|
||||
SubPlanState *sps = (SubPlanState *) lfirst(lst);
|
||||
SubPlan *sp = (SubPlan *) sps->xprstate.expr;
|
||||
SubPlan *sp = (SubPlan *) sps->xprstate.expr;
|
||||
|
||||
es->rtable = sp->rtable;
|
||||
for (i = 0; i < indent; i++)
|
||||
@ -865,7 +863,7 @@ show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
|
||||
*/
|
||||
if (outer_plan)
|
||||
{
|
||||
Relids varnos = pull_varnos(node);
|
||||
Relids varnos = pull_varnos(node);
|
||||
|
||||
if (bms_is_member(OUTER, varnos))
|
||||
outercontext = deparse_context_for_subplan("outer",
|
||||
@ -1037,9 +1035,7 @@ make_ors_ands_explicit(List *orclauses)
|
||||
|
||||
FastListInit(&args);
|
||||
foreach(orptr, orclauses)
|
||||
{
|
||||
FastAppend(&args, make_ands_explicit(lfirst(orptr)));
|
||||
}
|
||||
|
||||
return (Node *) make_orclause(FastListValue(&args));
|
||||
}
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.31 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.32 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* These routines take the parse tree and pick out the
|
||||
@ -80,8 +80,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
|
||||
if (languageOid == SQLlanguageId)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("SQL function cannot return shell type %s",
|
||||
TypeNameToString(returnType))));
|
||||
errmsg("SQL function cannot return shell type %s",
|
||||
TypeNameToString(returnType))));
|
||||
else
|
||||
ereport(NOTICE,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
@ -147,8 +147,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
|
||||
if (parameterCount >= FUNC_MAX_ARGS)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
|
||||
errmsg("functions cannot have more than %d arguments",
|
||||
FUNC_MAX_ARGS)));
|
||||
errmsg("functions cannot have more than %d arguments",
|
||||
FUNC_MAX_ARGS)));
|
||||
|
||||
toid = LookupTypeName(t);
|
||||
if (OidIsValid(toid))
|
||||
@ -159,8 +159,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
|
||||
if (languageOid == SQLlanguageId)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("SQL function cannot accept shell type %s",
|
||||
TypeNameToString(t))));
|
||||
errmsg("SQL function cannot accept shell type %s",
|
||||
TypeNameToString(t))));
|
||||
else
|
||||
ereport(NOTICE,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
@ -330,8 +330,8 @@ compute_attributes_with_style(List *parameters, bool *isStrict_p, char *volatili
|
||||
else
|
||||
ereport(WARNING,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("unrecognized function attribute \"%s\" ignored",
|
||||
param->defname)));
|
||||
errmsg("unrecognized function attribute \"%s\" ignored",
|
||||
param->defname)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -558,7 +558,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("\"%s\" is an aggregate function",
|
||||
NameListToString(functionName)),
|
||||
errhint("Use DROP AGGREGATE to drop aggregate functions.")));
|
||||
errhint("Use DROP AGGREGATE to drop aggregate functions.")));
|
||||
|
||||
if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
|
||||
{
|
||||
@ -664,7 +664,7 @@ RenameFunction(List *name, List *argtypes, const char *newname)
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("\"%s\" is an aggregate function",
|
||||
NameListToString(name)),
|
||||
errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
|
||||
errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
|
||||
|
||||
namespaceOid = procForm->pronamespace;
|
||||
|
||||
@ -728,7 +728,7 @@ SetFunctionReturnType(Oid funcOid, Oid newRetType)
|
||||
elog(ERROR, "cache lookup failed for function %u", funcOid);
|
||||
procForm = (Form_pg_proc) GETSTRUCT(tup);
|
||||
|
||||
if (procForm->prorettype != OPAQUEOID) /* caller messed up */
|
||||
if (procForm->prorettype != OPAQUEOID) /* caller messed up */
|
||||
elog(ERROR, "function %u doesn't return OPAQUE", funcOid);
|
||||
|
||||
/* okay to overwrite copied tuple */
|
||||
@ -815,7 +815,7 @@ CreateCast(CreateCastStmt *stmt)
|
||||
if (sourcetypeid == targettypeid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("source data type and target data type are the same")));
|
||||
errmsg("source data type and target data type are the same")));
|
||||
|
||||
/* No shells, no pseudo-types allowed */
|
||||
if (!get_typisdefined(sourcetypeid))
|
||||
@ -878,10 +878,11 @@ CreateCast(CreateCastStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("return data type of cast function must match target data type")));
|
||||
|
||||
/*
|
||||
* Restricting the volatility of a cast function may or may not be
|
||||
* a good idea in the abstract, but it definitely breaks many old
|
||||
* user-defined types. Disable this check --- tgl 2/1/03
|
||||
* user-defined types. Disable this check --- tgl 2/1/03
|
||||
*/
|
||||
#ifdef NOT_USED
|
||||
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
|
||||
@ -892,7 +893,7 @@ CreateCast(CreateCastStmt *stmt)
|
||||
if (procstruct->proisagg)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("cast function must not be an aggregate function")));
|
||||
errmsg("cast function must not be an aggregate function")));
|
||||
if (procstruct->proretset)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
@ -902,12 +903,12 @@ CreateCast(CreateCastStmt *stmt)
|
||||
}
|
||||
else
|
||||
{
|
||||
int16 typ1len;
|
||||
int16 typ2len;
|
||||
bool typ1byval;
|
||||
bool typ2byval;
|
||||
char typ1align;
|
||||
char typ2align;
|
||||
int16 typ1len;
|
||||
int16 typ2len;
|
||||
bool typ1byval;
|
||||
bool typ2byval;
|
||||
char typ1align;
|
||||
char typ2align;
|
||||
|
||||
/* indicates binary coercibility */
|
||||
funcid = InvalidOid;
|
||||
@ -924,7 +925,7 @@ CreateCast(CreateCastStmt *stmt)
|
||||
/*
|
||||
* Also, insist that the types match as to size, alignment, and
|
||||
* pass-by-value attributes; this provides at least a crude check
|
||||
* that they have similar representations. A pair of types that
|
||||
* that they have similar representations. A pair of types that
|
||||
* fail this test should certainly not be equated.
|
||||
*/
|
||||
get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align);
|
||||
@ -958,9 +959,9 @@ CreateCast(CreateCastStmt *stmt)
|
||||
relation = heap_openr(CastRelationName, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Check for duplicate. This is just to give a friendly error message,
|
||||
* the unique index would catch it anyway (so no need to sweat about
|
||||
* race conditions).
|
||||
* Check for duplicate. This is just to give a friendly error
|
||||
* message, the unique index would catch it anyway (so no need to
|
||||
* sweat about race conditions).
|
||||
*/
|
||||
tuple = SearchSysCache(CASTSOURCETARGET,
|
||||
ObjectIdGetDatum(sourcetypeid),
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.103 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.104 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -44,11 +44,11 @@
|
||||
/* non-export function prototypes */
|
||||
static void CheckPredicate(List *predList);
|
||||
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
|
||||
List *attList,
|
||||
Oid relId,
|
||||
char *accessMethodName, Oid accessMethodId);
|
||||
List *attList,
|
||||
Oid relId,
|
||||
char *accessMethodName, Oid accessMethodId);
|
||||
static Oid GetIndexOpClass(List *opclass, Oid attrType,
|
||||
char *accessMethodName, Oid accessMethodId);
|
||||
char *accessMethodName, Oid accessMethodId);
|
||||
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
|
||||
|
||||
/*
|
||||
@ -157,8 +157,8 @@ DefineIndex(RangeVar *heapRelation,
|
||||
if (unique && !accessMethodForm->amcanunique)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("access method \"%s\" does not support UNIQUE indexes",
|
||||
accessMethodName)));
|
||||
errmsg("access method \"%s\" does not support UNIQUE indexes",
|
||||
accessMethodName)));
|
||||
if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
@ -192,16 +192,16 @@ DefineIndex(RangeVar *heapRelation,
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that all of the attributes in a primary key are marked
|
||||
* as not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
|
||||
* Check that all of the attributes in a primary key are marked as not
|
||||
* null, otherwise attempt to ALTER TABLE .. SET NOT NULL
|
||||
*/
|
||||
if (primary)
|
||||
{
|
||||
List *keys;
|
||||
List *keys;
|
||||
|
||||
foreach(keys, attributeList)
|
||||
{
|
||||
IndexElem *key = (IndexElem *) lfirst(keys);
|
||||
IndexElem *key = (IndexElem *) lfirst(keys);
|
||||
HeapTuple atttuple;
|
||||
|
||||
if (!key->name)
|
||||
@ -216,15 +216,16 @@ DefineIndex(RangeVar *heapRelation,
|
||||
atttuple = SearchSysCacheAttName(relationId, key->name);
|
||||
if (HeapTupleIsValid(atttuple))
|
||||
{
|
||||
if (! ((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
|
||||
if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
|
||||
{
|
||||
/*
|
||||
* Try to make it NOT NULL.
|
||||
*
|
||||
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
|
||||
* to child tables? Currently, since the PRIMARY KEY
|
||||
* itself doesn't cascade, we don't cascade the notnull
|
||||
* constraint either; but this is pretty debatable.
|
||||
* itself doesn't cascade, we don't cascade the
|
||||
* notnull constraint either; but this is pretty
|
||||
* debatable.
|
||||
*/
|
||||
AlterTableAlterColumnSetNotNull(relationId, false,
|
||||
key->name);
|
||||
@ -236,8 +237,8 @@ DefineIndex(RangeVar *heapRelation,
|
||||
/* This shouldn't happen if parser did its job ... */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("column \"%s\" named in key does not exist",
|
||||
key->name)));
|
||||
errmsg("column \"%s\" named in key does not exist",
|
||||
key->name)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -248,7 +249,7 @@ DefineIndex(RangeVar *heapRelation,
|
||||
*/
|
||||
indexInfo = makeNode(IndexInfo);
|
||||
indexInfo->ii_NumIndexAttrs = numberOfAttributes;
|
||||
indexInfo->ii_Expressions = NIL; /* for now */
|
||||
indexInfo->ii_Expressions = NIL; /* for now */
|
||||
indexInfo->ii_ExpressionsState = NIL;
|
||||
indexInfo->ii_Predicate = cnfPred;
|
||||
indexInfo->ii_PredicateState = NIL;
|
||||
@ -308,7 +309,7 @@ CheckPredicate(List *predList)
|
||||
if (contain_mutable_functions((Node *) predList))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("functions in index predicate must be marked IMMUTABLE")));
|
||||
errmsg("functions in index predicate must be marked IMMUTABLE")));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -351,7 +352,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
|
||||
else if (attribute->expr && IsA(attribute->expr, Var))
|
||||
{
|
||||
/* Tricky tricky, he wrote (column) ... treat as simple attr */
|
||||
Var *var = (Var *) attribute->expr;
|
||||
Var *var = (Var *) attribute->expr;
|
||||
|
||||
indexInfo->ii_KeyAttrNumbers[attn] = var->varattno;
|
||||
atttype = get_atttype(relId, var->varattno);
|
||||
@ -360,30 +361,30 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
|
||||
{
|
||||
/* Index expression */
|
||||
Assert(attribute->expr != NULL);
|
||||
indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
|
||||
indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
|
||||
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
|
||||
attribute->expr);
|
||||
atttype = exprType(attribute->expr);
|
||||
|
||||
/*
|
||||
* We don't currently support generation of an actual query plan
|
||||
* for an index expression, only simple scalar expressions;
|
||||
* hence these restrictions.
|
||||
* We don't currently support generation of an actual query
|
||||
* plan for an index expression, only simple scalar
|
||||
* expressions; hence these restrictions.
|
||||
*/
|
||||
if (contain_subplans(attribute->expr))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use sub-select in index expression")));
|
||||
errmsg("cannot use sub-select in index expression")));
|
||||
if (contain_agg_clause(attribute->expr))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_GROUPING_ERROR),
|
||||
errmsg("cannot use aggregate in index expression")));
|
||||
errmsg("cannot use aggregate in index expression")));
|
||||
|
||||
/*
|
||||
* A expression using mutable functions is probably wrong,
|
||||
* since if you aren't going to get the same result for the same
|
||||
* data every time, it's not clear what the index entries mean at
|
||||
* all.
|
||||
* since if you aren't going to get the same result for the
|
||||
* same data every time, it's not clear what the index entries
|
||||
* mean at all.
|
||||
*/
|
||||
if (contain_mutable_functions(attribute->expr))
|
||||
ereport(ERROR,
|
||||
@ -413,21 +414,20 @@ GetIndexOpClass(List *opclass, Oid attrType,
|
||||
opInputType;
|
||||
|
||||
/*
|
||||
* Release 7.0 removed network_ops, timespan_ops, and
|
||||
* datetime_ops, so we ignore those opclass names
|
||||
* so the default *_ops is used. This can be
|
||||
* removed in some later release. bjm 2000/02/07
|
||||
* Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so
|
||||
* we ignore those opclass names so the default *_ops is used. This
|
||||
* can be removed in some later release. bjm 2000/02/07
|
||||
*
|
||||
* Release 7.1 removes lztext_ops, so suppress that too
|
||||
* for a while. tgl 2000/07/30
|
||||
* Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
|
||||
* 2000/07/30
|
||||
*
|
||||
* Release 7.2 renames timestamp_ops to timestamptz_ops,
|
||||
* so suppress that too for awhile. I'm starting to
|
||||
* think we need a better approach. tgl 2000/10/01
|
||||
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
|
||||
* too for awhile. I'm starting to think we need a better approach.
|
||||
* tgl 2000/10/01
|
||||
*/
|
||||
if (length(opclass) == 1)
|
||||
{
|
||||
char *claname = strVal(lfirst(opclass));
|
||||
char *claname = strVal(lfirst(opclass));
|
||||
|
||||
if (strcmp(claname, "network_ops") == 0 ||
|
||||
strcmp(claname, "timespan_ops") == 0 ||
|
||||
@ -499,8 +499,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
|
||||
if (!IsBinaryCoercible(attrType, opInputType))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("operator class \"%s\" does not accept data type %s",
|
||||
NameListToString(opclass), format_type_be(attrType))));
|
||||
errmsg("operator class \"%s\" does not accept data type %s",
|
||||
NameListToString(opclass), format_type_be(attrType))));
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
@ -607,7 +607,7 @@ ReindexIndex(RangeVar *indexRelation, bool force /* currently unused */ )
|
||||
tuple = SearchSysCache(RELOID,
|
||||
ObjectIdGetDatum(indOid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
|
||||
if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
|
||||
elog(ERROR, "cache lookup failed for relation %u", indOid);
|
||||
|
||||
if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
|
||||
@ -785,7 +785,8 @@ ReindexDatabase(const char *dbname, bool force, bool all)
|
||||
for (i = 0; i < relcnt; i++)
|
||||
{
|
||||
StartTransactionCommand();
|
||||
SetQuerySnapshot(); /* might be needed for functions in indexes */
|
||||
SetQuerySnapshot(); /* might be needed for functions in
|
||||
* indexes */
|
||||
if (reindex_relation(relids[i], force))
|
||||
ereport(NOTICE,
|
||||
(errmsg("relation %u was reindexed", relids[i])));
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.15 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.16 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -103,13 +103,13 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
* Currently, we require superuser privileges to create an opclass.
|
||||
* This seems necessary because we have no way to validate that the
|
||||
* offered set of operators and functions are consistent with the AM's
|
||||
* expectations. It would be nice to provide such a check someday,
|
||||
* if it can be done without solving the halting problem :-(
|
||||
* expectations. It would be nice to provide such a check someday, if
|
||||
* it can be done without solving the halting problem :-(
|
||||
*/
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to create an operator class")));
|
||||
errmsg("must be superuser to create an operator class")));
|
||||
|
||||
/* Look up the datatype */
|
||||
typeoid = typenameTypeId(stmt->datatype);
|
||||
@ -157,8 +157,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
if (operators[item->number - 1] != InvalidOid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("operator number %d appears more than once",
|
||||
item->number)));
|
||||
errmsg("operator number %d appears more than once",
|
||||
item->number)));
|
||||
if (item->args != NIL)
|
||||
{
|
||||
TypeName *typeName1 = (TypeName *) lfirst(item->args);
|
||||
@ -211,7 +211,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
if (OidIsValid(storageoid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("storage type specified more than once")));
|
||||
errmsg("storage type specified more than once")));
|
||||
storageoid = typenameTypeId(item->storedtype);
|
||||
break;
|
||||
default:
|
||||
@ -532,7 +532,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
|
||||
NameListToString(stmt->opclassname), stmt->amname)));
|
||||
NameListToString(stmt->opclassname), stmt->amname)));
|
||||
|
||||
opcID = HeapTupleGetOid(tuple);
|
||||
|
||||
@ -681,7 +681,7 @@ RenameOpClass(List *name, const char *access_method, const char *newname)
|
||||
tup = SearchSysCacheCopy(CLAOID,
|
||||
ObjectIdGetDatum(opcOid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
elog(ERROR, "cache lookup failed for opclass %u", opcOid);
|
||||
|
||||
namespaceOid = ((Form_pg_opclass) GETSTRUCT(tup))->opcnamespace;
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.10 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.11 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@ -103,7 +103,7 @@ DefineOperator(List *names, List *parameters)
|
||||
if (typeName1->setof)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("setof type not allowed for operator argument")));
|
||||
errmsg("setof type not allowed for operator argument")));
|
||||
}
|
||||
else if (strcasecmp(defel->defname, "rightarg") == 0)
|
||||
{
|
||||
@ -111,7 +111,7 @@ DefineOperator(List *names, List *parameters)
|
||||
if (typeName2->setof)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("setof type not allowed for operator argument")));
|
||||
errmsg("setof type not allowed for operator argument")));
|
||||
}
|
||||
else if (strcasecmp(defel->defname, "procedure") == 0)
|
||||
functionName = defGetQualifiedName(defel);
|
||||
|
@ -4,17 +4,17 @@
|
||||
* Utility commands affecting portals (that is, SQL cursor commands)
|
||||
*
|
||||
* Note: see also tcop/pquery.c, which implements portal operations for
|
||||
* the FE/BE protocol. This module uses pquery.c for some operations.
|
||||
* the FE/BE protocol. This module uses pquery.c for some operations.
|
||||
* And both modules depend on utils/mmgr/portalmem.c, which controls
|
||||
* storage management for portals (but doesn't run any queries in them).
|
||||
*
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.19 2003/08/01 13:53:36 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.20 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -36,7 +36,7 @@
|
||||
* Execute SQL DECLARE CURSOR command.
|
||||
*/
|
||||
void
|
||||
PerformCursorOpen(DeclareCursorStmt *stmt)
|
||||
PerformCursorOpen(DeclareCursorStmt * stmt)
|
||||
{
|
||||
List *rewritten;
|
||||
Query *query;
|
||||
@ -64,7 +64,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
|
||||
/*
|
||||
* The query has been through parse analysis, but not rewriting or
|
||||
* planning as yet. Note that the grammar ensured we have a SELECT
|
||||
* query, so we are not expecting rule rewriting to do anything strange.
|
||||
* query, so we are not expecting rule rewriting to do anything
|
||||
* strange.
|
||||
*/
|
||||
rewritten = QueryRewrite((Query *) stmt->query);
|
||||
if (length(rewritten) != 1 || !IsA(lfirst(rewritten), Query))
|
||||
@ -86,8 +87,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
|
||||
plan = planner(query, true, stmt->options);
|
||||
|
||||
/*
|
||||
* Create a portal and copy the query and plan into its memory context.
|
||||
* (If a duplicate cursor name already exists, warn and drop it.)
|
||||
* Create a portal and copy the query and plan into its memory
|
||||
* context. (If a duplicate cursor name already exists, warn and drop
|
||||
* it.)
|
||||
*/
|
||||
portal = CreatePortal(stmt->portalname, true, false);
|
||||
|
||||
@ -98,7 +100,7 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
|
||||
|
||||
PortalDefineQuery(portal,
|
||||
NULL, /* unfortunately don't have sourceText */
|
||||
"SELECT", /* cursor's query is always a SELECT */
|
||||
"SELECT", /* cursor's query is always a SELECT */
|
||||
makeList1(query),
|
||||
makeList1(plan),
|
||||
PortalGetHeapMemory(portal));
|
||||
@ -108,9 +110,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
|
||||
/*
|
||||
* Set up options for portal.
|
||||
*
|
||||
* If the user didn't specify a SCROLL type, allow or disallow
|
||||
* scrolling based on whether it would require any additional
|
||||
* runtime overhead to do so.
|
||||
* If the user didn't specify a SCROLL type, allow or disallow scrolling
|
||||
* based on whether it would require any additional runtime overhead
|
||||
* to do so.
|
||||
*/
|
||||
portal->cursorOptions = stmt->options;
|
||||
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
|
||||
@ -129,8 +131,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
|
||||
Assert(portal->strategy == PORTAL_ONE_SELECT);
|
||||
|
||||
/*
|
||||
* We're done; the query won't actually be run until PerformPortalFetch
|
||||
* is called.
|
||||
* We're done; the query won't actually be run until
|
||||
* PerformPortalFetch is called.
|
||||
*/
|
||||
}
|
||||
|
||||
@ -169,7 +171,7 @@ PerformPortalFetch(FetchStmt *stmt,
|
||||
/* FIXME: shouldn't this be an ERROR? */
|
||||
ereport(WARNING,
|
||||
(errcode(ERRCODE_UNDEFINED_CURSOR),
|
||||
errmsg("portal \"%s\" does not exist", stmt->portalname)));
|
||||
errmsg("portal \"%s\" does not exist", stmt->portalname)));
|
||||
if (completionTag)
|
||||
strcpy(completionTag, stmt->ismove ? "MOVE 0" : "FETCH 0");
|
||||
return;
|
||||
@ -219,7 +221,7 @@ PerformPortalClose(const char *name)
|
||||
ereport(WARNING,
|
||||
(errcode(ERRCODE_UNDEFINED_CURSOR),
|
||||
errmsg("portal \"%s\" does not exist", name),
|
||||
errfunction("PerformPortalClose"))); /* for ecpg */
|
||||
errfunction("PerformPortalClose"))); /* for ecpg */
|
||||
return;
|
||||
}
|
||||
|
||||
@ -249,7 +251,8 @@ PortalCleanup(Portal portal, bool isError)
|
||||
/*
|
||||
* Shut down executor, if still running. We skip this during error
|
||||
* abort, since other mechanisms will take care of releasing executor
|
||||
* resources, and we can't be sure that ExecutorEnd itself wouldn't fail.
|
||||
* resources, and we can't be sure that ExecutorEnd itself wouldn't
|
||||
* fail.
|
||||
*/
|
||||
queryDesc = PortalGetQueryDesc(portal);
|
||||
if (queryDesc)
|
||||
@ -271,14 +274,14 @@ PortalCleanup(Portal portal, bool isError)
|
||||
void
|
||||
PersistHoldablePortal(Portal portal)
|
||||
{
|
||||
QueryDesc *queryDesc = PortalGetQueryDesc(portal);
|
||||
QueryDesc *queryDesc = PortalGetQueryDesc(portal);
|
||||
MemoryContext savePortalContext;
|
||||
MemoryContext saveQueryContext;
|
||||
MemoryContext oldcxt;
|
||||
|
||||
/*
|
||||
* If we're preserving a holdable portal, we had better be
|
||||
* inside the transaction that originally created it.
|
||||
* If we're preserving a holdable portal, we had better be inside the
|
||||
* transaction that originally created it.
|
||||
*/
|
||||
Assert(portal->createXact == GetCurrentTransactionId());
|
||||
Assert(queryDesc != NULL);
|
||||
@ -321,9 +324,8 @@ PersistHoldablePortal(Portal portal)
|
||||
MemoryContextSwitchTo(PortalContext);
|
||||
|
||||
/*
|
||||
* Rewind the executor: we need to store the entire result set in
|
||||
* the tuplestore, so that subsequent backward FETCHs can be
|
||||
* processed.
|
||||
* Rewind the executor: we need to store the entire result set in the
|
||||
* tuplestore, so that subsequent backward FETCHs can be processed.
|
||||
*/
|
||||
ExecutorRewind(queryDesc);
|
||||
|
||||
@ -351,17 +353,17 @@ PersistHoldablePortal(Portal portal)
|
||||
/*
|
||||
* Reset the position in the result set: ideally, this could be
|
||||
* implemented by just skipping straight to the tuple # that we need
|
||||
* to be at, but the tuplestore API doesn't support that. So we
|
||||
* start at the beginning of the tuplestore and iterate through it
|
||||
* until we reach where we need to be. FIXME someday?
|
||||
* to be at, but the tuplestore API doesn't support that. So we start
|
||||
* at the beginning of the tuplestore and iterate through it until we
|
||||
* reach where we need to be. FIXME someday?
|
||||
*/
|
||||
MemoryContextSwitchTo(portal->holdContext);
|
||||
|
||||
if (!portal->atEnd)
|
||||
{
|
||||
long store_pos;
|
||||
long store_pos;
|
||||
|
||||
if (portal->posOverflow) /* oops, cannot trust portalPos */
|
||||
if (portal->posOverflow) /* oops, cannot trust portalPos */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not reposition held cursor")));
|
||||
@ -370,8 +372,8 @@ PersistHoldablePortal(Portal portal)
|
||||
|
||||
for (store_pos = 0; store_pos < portal->portalPos; store_pos++)
|
||||
{
|
||||
HeapTuple tup;
|
||||
bool should_free;
|
||||
HeapTuple tup;
|
||||
bool should_free;
|
||||
|
||||
tup = tuplestore_gettuple(portal->holdStore, true,
|
||||
&should_free);
|
||||
@ -389,8 +391,8 @@ PersistHoldablePortal(Portal portal)
|
||||
/*
|
||||
* We can now release any subsidiary memory of the portal's heap
|
||||
* context; we'll never use it again. The executor already dropped
|
||||
* its context, but this will clean up anything that glommed onto
|
||||
* the portal's heap via PortalContext.
|
||||
* its context, but this will clean up anything that glommed onto the
|
||||
* portal's heap via PortalContext.
|
||||
*/
|
||||
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
|
||||
}
|
||||
|
@ -10,7 +10,7 @@
|
||||
* Copyright (c) 2002-2003, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.21 2003/07/28 00:09:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.22 2003/08/04 00:43:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -39,7 +39,7 @@ static HTAB *prepared_queries = NULL;
|
||||
|
||||
static void InitQueryHashTable(void);
|
||||
static ParamListInfo EvaluateParams(EState *estate,
|
||||
List *params, List *argtypes);
|
||||
List *params, List *argtypes);
|
||||
|
||||
/*
|
||||
* Implements the 'PREPARE' utility statement.
|
||||
@ -90,12 +90,12 @@ PrepareQuery(PrepareStmt *stmt)
|
||||
/* Rewrite the query. The result could be 0, 1, or many queries. */
|
||||
query_list = QueryRewrite(stmt->query);
|
||||
|
||||
/* Generate plans for queries. Snapshot is already set. */
|
||||
/* Generate plans for queries. Snapshot is already set. */
|
||||
plan_list = pg_plan_queries(query_list, false);
|
||||
|
||||
/* Save the results. */
|
||||
StorePreparedStatement(stmt->name,
|
||||
NULL, /* text form not available */
|
||||
NULL, /* text form not available */
|
||||
commandTag,
|
||||
query_list,
|
||||
plan_list,
|
||||
@ -131,8 +131,8 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
|
||||
if (entry->argtype_list != NIL)
|
||||
{
|
||||
/*
|
||||
* Need an EState to evaluate parameters; must not delete it
|
||||
* till end of query, in case parameters are pass-by-reference.
|
||||
* Need an EState to evaluate parameters; must not delete it till
|
||||
* end of query, in case parameters are pass-by-reference.
|
||||
*/
|
||||
estate = CreateExecutorState();
|
||||
paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list);
|
||||
@ -144,15 +144,15 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
|
||||
portal = CreateNewPortal();
|
||||
|
||||
/*
|
||||
* For CREATE TABLE / AS EXECUTE, make a copy of the stored query
|
||||
* so that we can modify its destination (yech, but this has
|
||||
* always been ugly). For regular EXECUTE we can just use the
|
||||
* stored query where it sits, since the executor is read-only.
|
||||
* For CREATE TABLE / AS EXECUTE, make a copy of the stored query so
|
||||
* that we can modify its destination (yech, but this has always been
|
||||
* ugly). For regular EXECUTE we can just use the stored query where
|
||||
* it sits, since the executor is read-only.
|
||||
*/
|
||||
if (stmt->into)
|
||||
{
|
||||
MemoryContext oldContext;
|
||||
Query *query;
|
||||
Query *query;
|
||||
|
||||
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
|
||||
|
||||
@ -208,11 +208,11 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
|
||||
static ParamListInfo
|
||||
EvaluateParams(EState *estate, List *params, List *argtypes)
|
||||
{
|
||||
int nargs = length(argtypes);
|
||||
ParamListInfo paramLI;
|
||||
List *exprstates;
|
||||
List *l;
|
||||
int i = 0;
|
||||
int nargs = length(argtypes);
|
||||
ParamListInfo paramLI;
|
||||
List *exprstates;
|
||||
List *l;
|
||||
int i = 0;
|
||||
|
||||
/* Parser should have caught this error, but check for safety */
|
||||
if (length(params) != nargs)
|
||||
@ -229,7 +229,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
|
||||
bool isNull;
|
||||
|
||||
paramLI[i].value = ExecEvalExprSwitchContext(n,
|
||||
GetPerTupleExprContext(estate),
|
||||
GetPerTupleExprContext(estate),
|
||||
&isNull,
|
||||
NULL);
|
||||
paramLI[i].kind = PARAM_NUM;
|
||||
@ -273,7 +273,7 @@ InitQueryHashTable(void)
|
||||
* to the hash entry, so the caller can dispose of their copy.
|
||||
*
|
||||
* Exception: commandTag is presumed to be a pointer to a constant string,
|
||||
* or possibly NULL, so it need not be copied. Note that commandTag should
|
||||
* or possibly NULL, so it need not be copied. Note that commandTag should
|
||||
* be NULL only if the original query (before rewriting) was empty.
|
||||
*/
|
||||
void
|
||||
@ -367,9 +367,9 @@ FetchPreparedStatement(const char *stmt_name, bool throwError)
|
||||
if (prepared_queries)
|
||||
{
|
||||
/*
|
||||
* We can't just use the statement name as supplied by the user: the
|
||||
* hash package is picky enough that it needs to be NULL-padded out to
|
||||
* the appropriate length to work correctly.
|
||||
* We can't just use the statement name as supplied by the user:
|
||||
* the hash package is picky enough that it needs to be
|
||||
* NULL-padded out to the appropriate length to work correctly.
|
||||
*/
|
||||
MemSet(key, 0, sizeof(key));
|
||||
strncpy(key, stmt_name, sizeof(key));
|
||||
@ -412,9 +412,9 @@ FetchPreparedStatementParams(const char *stmt_name)
|
||||
* Note: the result is created or copied into current memory context.
|
||||
*/
|
||||
TupleDesc
|
||||
FetchPreparedStatementResultDesc(PreparedStatement *stmt)
|
||||
FetchPreparedStatementResultDesc(PreparedStatement * stmt)
|
||||
{
|
||||
Query *query;
|
||||
Query *query;
|
||||
|
||||
switch (ChoosePortalStrategy(stmt->query_list))
|
||||
{
|
||||
@ -476,7 +476,7 @@ DropPreparedStatement(const char *stmt_name, bool showError)
|
||||
void
|
||||
ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
|
||||
{
|
||||
ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
|
||||
ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
|
||||
PreparedStatement *entry;
|
||||
List *l,
|
||||
*query_list,
|
||||
@ -499,8 +499,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
|
||||
if (entry->argtype_list != NIL)
|
||||
{
|
||||
/*
|
||||
* Need an EState to evaluate parameters; must not delete it
|
||||
* till end of query, in case parameters are pass-by-reference.
|
||||
* Need an EState to evaluate parameters; must not delete it till
|
||||
* end of query, in case parameters are pass-by-reference.
|
||||
*/
|
||||
estate = CreateExecutorState();
|
||||
paramLI = EvaluateParams(estate, execstmt->params,
|
||||
@ -510,8 +510,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
|
||||
/* Explain each query */
|
||||
foreach(l, query_list)
|
||||
{
|
||||
Query *query = (Query *) lfirst(l);
|
||||
Plan *plan = (Plan *) lfirst(plan_list);
|
||||
Query *query = (Query *) lfirst(l);
|
||||
Plan *plan = (Plan *) lfirst(plan_list);
|
||||
bool is_last_query;
|
||||
|
||||
plan_list = lnext(plan_list);
|
||||
@ -533,7 +533,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
|
||||
if (query->commandType != CMD_SELECT)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("prepared statement is not a SELECT")));
|
||||
errmsg("prepared statement is not a SELECT")));
|
||||
|
||||
/* Copy the query so we can modify it */
|
||||
query = copyObject(query);
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.47 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.48 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -60,7 +60,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to create procedural language")));
|
||||
errmsg("must be superuser to create procedural language")));
|
||||
|
||||
/*
|
||||
* Translate the language name and check that this language doesn't
|
||||
@ -85,7 +85,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
|
||||
if (funcrettype != LANGUAGE_HANDLEROID)
|
||||
{
|
||||
/*
|
||||
* We allow OPAQUE just so we can load old dump files. When we
|
||||
* We allow OPAQUE just so we can load old dump files. When we
|
||||
* see a handler function declared OPAQUE, change it to
|
||||
* LANGUAGE_HANDLER.
|
||||
*/
|
||||
@ -183,7 +183,7 @@ DropProceduralLanguage(DropPLangStmt *stmt)
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to drop procedural language")));
|
||||
errmsg("must be superuser to drop procedural language")));
|
||||
|
||||
/*
|
||||
* Translate the language name, check that this language exist and is
|
||||
@ -225,7 +225,7 @@ DropProceduralLanguageById(Oid langOid)
|
||||
langTup = SearchSysCache(LANGOID,
|
||||
ObjectIdGetDatum(langOid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(langTup)) /* should not happen */
|
||||
if (!HeapTupleIsValid(langTup)) /* should not happen */
|
||||
elog(ERROR, "cache lookup failed for language %u", langOid);
|
||||
|
||||
simple_heap_delete(rel, &langTup->t_self);
|
||||
@ -266,7 +266,7 @@ RenameLanguage(const char *oldname, const char *newname)
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to rename procedural language")));
|
||||
errmsg("must be superuser to rename procedural language")));
|
||||
|
||||
/* rename */
|
||||
namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.14 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.15 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -98,7 +98,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_RESERVED_NAME),
|
||||
errmsg("unacceptable schema name \"%s\"", schemaName),
|
||||
errdetail("The prefix \"pg_\" is reserved for system schemas.")));
|
||||
errdetail("The prefix \"pg_\" is reserved for system schemas.")));
|
||||
|
||||
/* Create the schema's namespace */
|
||||
namespaceId = NamespaceCreate(schemaName, owner_userid);
|
||||
@ -215,7 +215,7 @@ RemoveSchemaById(Oid schemaOid)
|
||||
tup = SearchSysCache(NAMESPACEOID,
|
||||
ObjectIdGetDatum(schemaOid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
elog(ERROR, "cache lookup failed for namespace %u", schemaOid);
|
||||
|
||||
simple_heap_delete(relation, &tup->t_self);
|
||||
@ -248,9 +248,9 @@ RenameSchema(const char *oldname, const char *newname)
|
||||
|
||||
/* make sure the new name doesn't exist */
|
||||
if (HeapTupleIsValid(
|
||||
SearchSysCache(NAMESPACENAME,
|
||||
CStringGetDatum(newname),
|
||||
0, 0, 0)))
|
||||
SearchSysCache(NAMESPACENAME,
|
||||
CStringGetDatum(newname),
|
||||
0, 0, 0)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_SCHEMA),
|
||||
errmsg("schema \"%s\" already exists", newname)));
|
||||
@ -270,7 +270,7 @@ RenameSchema(const char *oldname, const char *newname)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_RESERVED_NAME),
|
||||
errmsg("unacceptable schema name \"%s\"", newname),
|
||||
errdetail("The prefix \"pg_\" is reserved for system schemas.")));
|
||||
errdetail("The prefix \"pg_\" is reserved for system schemas.")));
|
||||
|
||||
/* rename */
|
||||
namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.99 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.100 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -68,7 +68,7 @@ static SeqTable seqtab = NULL; /* Head of list of SeqTable items */
|
||||
|
||||
|
||||
static void init_sequence(RangeVar *relation,
|
||||
SeqTable *p_elm, Relation *p_rel);
|
||||
SeqTable *p_elm, Relation *p_rel);
|
||||
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
|
||||
static void init_params(List *options, Form_pg_sequence new);
|
||||
static void do_setval(RangeVar *sequence, int64 next, bool iscalled);
|
||||
@ -97,10 +97,10 @@ DefineSequence(CreateSeqStmt *seq)
|
||||
/* Values are NULL (or false) by default */
|
||||
new.last_value = 0;
|
||||
new.increment_by = 0;
|
||||
new.max_value = 0;
|
||||
new.max_value = 0;
|
||||
new.min_value = 0;
|
||||
new.cache_value = 0;
|
||||
new.is_cycled = false;
|
||||
new.is_cycled = false;
|
||||
|
||||
/* Check and set values */
|
||||
init_params(seq->options, &new);
|
||||
@ -299,10 +299,10 @@ DefineSequence(CreateSeqStmt *seq)
|
||||
/*
|
||||
* AlterSequence
|
||||
*
|
||||
* Modify the defition of a sequence relation
|
||||
* Modify the defition of a sequence relation
|
||||
*/
|
||||
void
|
||||
AlterSequence(AlterSeqStmt *stmt)
|
||||
AlterSequence(AlterSeqStmt * stmt)
|
||||
{
|
||||
SeqTable elm;
|
||||
Relation seqrel;
|
||||
@ -324,7 +324,7 @@ AlterSequence(AlterSeqStmt *stmt)
|
||||
page = BufferGetPage(buf);
|
||||
|
||||
new.increment_by = seq->increment_by;
|
||||
new.max_value = seq->max_value;
|
||||
new.max_value = seq->max_value;
|
||||
new.min_value = seq->min_value;
|
||||
new.cache_value = seq->cache_value;
|
||||
new.is_cycled = seq->is_cycled;
|
||||
@ -346,9 +346,9 @@ AlterSequence(AlterSeqStmt *stmt)
|
||||
}
|
||||
|
||||
/* save info in local cache */
|
||||
elm->last = new.last_value; /* last returned number */
|
||||
elm->cached = new.last_value; /* last cached number (forget cached
|
||||
* values) */
|
||||
elm->last = new.last_value; /* last returned number */
|
||||
elm->cached = new.last_value; /* last cached number (forget
|
||||
* cached values) */
|
||||
|
||||
START_CRIT_SECTION();
|
||||
|
||||
@ -494,9 +494,9 @@ nextval(PG_FUNCTION_ARGS)
|
||||
|
||||
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("%s.nextval: reached MAXVALUE (%s)",
|
||||
sequence->relname, buf)));
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("%s.nextval: reached MAXVALUE (%s)",
|
||||
sequence->relname, buf)));
|
||||
}
|
||||
next = minv;
|
||||
}
|
||||
@ -517,9 +517,9 @@ nextval(PG_FUNCTION_ARGS)
|
||||
|
||||
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("%s.nextval: reached MINVALUE (%s)",
|
||||
sequence->relname, buf)));
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("%s.nextval: reached MINVALUE (%s)",
|
||||
sequence->relname, buf)));
|
||||
}
|
||||
next = maxv;
|
||||
}
|
||||
@ -895,9 +895,9 @@ init_params(List *options, Form_pg_sequence new)
|
||||
errmsg("conflicting or redundant options")));
|
||||
increment_by = defel;
|
||||
}
|
||||
|
||||
/*
|
||||
* start is for a new sequence
|
||||
* restart is for alter
|
||||
* start is for a new sequence restart is for alter
|
||||
*/
|
||||
else if (strcmp(defel->defname, "start") == 0 ||
|
||||
strcmp(defel->defname, "restart") == 0)
|
||||
@ -963,9 +963,9 @@ init_params(List *options, Form_pg_sequence new)
|
||||
|| (max_value != (DefElem *) NULL && !max_value->arg))
|
||||
{
|
||||
if (new->increment_by > 0)
|
||||
new->max_value = SEQ_MAXVALUE; /* ascending seq */
|
||||
new->max_value = SEQ_MAXVALUE; /* ascending seq */
|
||||
else
|
||||
new->max_value = -1; /* descending seq */
|
||||
new->max_value = -1; /* descending seq */
|
||||
}
|
||||
else if (max_value != (DefElem *) NULL)
|
||||
new->max_value = defGetInt64(max_value);
|
||||
@ -975,9 +975,9 @@ init_params(List *options, Form_pg_sequence new)
|
||||
|| (min_value != (DefElem *) NULL && !min_value->arg))
|
||||
{
|
||||
if (new->increment_by > 0)
|
||||
new->min_value = 1; /* ascending seq */
|
||||
new->min_value = 1; /* ascending seq */
|
||||
else
|
||||
new->min_value = SEQ_MINVALUE; /* descending seq */
|
||||
new->min_value = SEQ_MINVALUE; /* descending seq */
|
||||
}
|
||||
else if (min_value != (DefElem *) NULL)
|
||||
new->min_value = defGetInt64(min_value);
|
||||
@ -996,7 +996,7 @@ init_params(List *options, Form_pg_sequence new)
|
||||
}
|
||||
|
||||
/* START WITH */
|
||||
if (new->last_value == 0 && last_value == (DefElem *) NULL)
|
||||
if (new->last_value == 0 && last_value == (DefElem *) NULL)
|
||||
{
|
||||
if (new->increment_by > 0)
|
||||
new->last_value = new->min_value; /* ascending seq */
|
||||
@ -1015,8 +1015,8 @@ init_params(List *options, Form_pg_sequence new)
|
||||
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value);
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("START value (%s) can't be less than MINVALUE (%s)",
|
||||
bufs, bufm)));
|
||||
errmsg("START value (%s) can't be less than MINVALUE (%s)",
|
||||
bufs, bufm)));
|
||||
}
|
||||
if (new->last_value > new->max_value)
|
||||
{
|
||||
@ -1027,8 +1027,8 @@ init_params(List *options, Form_pg_sequence new)
|
||||
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
|
||||
bufs, bufm)));
|
||||
errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
|
||||
bufs, bufm)));
|
||||
}
|
||||
|
||||
/* CACHE */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.76 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.77 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -57,18 +57,19 @@
|
||||
*/
|
||||
typedef struct OnCommitItem
|
||||
{
|
||||
Oid relid; /* relid of relation */
|
||||
OnCommitAction oncommit; /* what to do at end of xact */
|
||||
Oid relid; /* relid of relation */
|
||||
OnCommitAction oncommit; /* what to do at end of xact */
|
||||
|
||||
/*
|
||||
* If this entry was created during this xact, it should be deleted at
|
||||
* xact abort. Conversely, if this entry was deleted during this
|
||||
* xact, it should be removed at xact commit. We leave deleted
|
||||
* entries in the list until commit so that we can roll back if needed.
|
||||
* entries in the list until commit so that we can roll back if
|
||||
* needed.
|
||||
*/
|
||||
bool created_in_cur_xact;
|
||||
bool deleted_in_cur_xact;
|
||||
} OnCommitItem;
|
||||
} OnCommitItem;
|
||||
|
||||
static List *on_commits = NIL;
|
||||
|
||||
@ -82,14 +83,14 @@ static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
|
||||
static bool needs_toast_table(Relation rel);
|
||||
static void AlterTableAddCheckConstraint(Relation rel, Constraint *constr);
|
||||
static void AlterTableAddForeignKeyConstraint(Relation rel,
|
||||
FkConstraint *fkconstraint);
|
||||
FkConstraint *fkconstraint);
|
||||
static int transformColumnNameList(Oid relId, List *colList,
|
||||
int16 *attnums, Oid *atttypids);
|
||||
int16 *attnums, Oid *atttypids);
|
||||
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
|
||||
List **attnamelist,
|
||||
int16 *attnums, Oid *atttypids);
|
||||
static Oid transformFkeyCheckAttrs(Relation pkrel,
|
||||
int numattrs, int16 *attnums);
|
||||
List **attnamelist,
|
||||
int16 *attnums, Oid *atttypids);
|
||||
static Oid transformFkeyCheckAttrs(Relation pkrel,
|
||||
int numattrs, int16 *attnums);
|
||||
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
|
||||
Relation rel, Relation pkrel);
|
||||
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
|
||||
@ -206,8 +207,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
|
||||
if (strcmp(check[i].ccname, cdef->name) == 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("duplicate CHECK constraint name \"%s\"",
|
||||
cdef->name)));
|
||||
errmsg("duplicate CHECK constraint name \"%s\"",
|
||||
cdef->name)));
|
||||
}
|
||||
check[ncheck].ccname = cdef->name;
|
||||
}
|
||||
@ -399,7 +400,7 @@ TruncateRelation(const RangeVar *relation)
|
||||
if (isOtherTempNamespace(RelationGetNamespace(rel)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot truncate temp tables of other processes")));
|
||||
errmsg("cannot truncate temp tables of other processes")));
|
||||
|
||||
/*
|
||||
* Don't allow truncate on tables which are referenced by foreign keys
|
||||
@ -435,8 +436,8 @@ TruncateRelation(const RangeVar *relation)
|
||||
heap_close(fkeyRel, AccessShareLock);
|
||||
|
||||
/*
|
||||
* Do the real work using the same technique as cluster, but
|
||||
* without the data-copying portion
|
||||
* Do the real work using the same technique as cluster, but without
|
||||
* the data-copying portion
|
||||
*/
|
||||
rebuild_relation(rel, InvalidOid);
|
||||
|
||||
@ -570,8 +571,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
|
||||
if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot inherit from temporary relation \"%s\"",
|
||||
parent->relname)));
|
||||
errmsg("cannot inherit from temporary relation \"%s\"",
|
||||
parent->relname)));
|
||||
|
||||
/*
|
||||
* We should have an UNDER permission flag for this, but for now,
|
||||
@ -652,7 +653,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
TypeNameToString(def->typename),
|
||||
format_type_be(attribute->atttypid))));
|
||||
format_type_be(attribute->atttypid))));
|
||||
def->inhcount++;
|
||||
/* Merge of NOT NULL constraints = OR 'em together */
|
||||
def->is_not_null |= attribute->attnotnull;
|
||||
@ -803,11 +804,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
|
||||
def->typename->typmod != newdef->typename->typmod)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("attribute \"%s\" has a type conflict",
|
||||
attributeName),
|
||||
errmsg("attribute \"%s\" has a type conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
TypeNameToString(def->typename),
|
||||
TypeNameToString(newdef->typename))));
|
||||
TypeNameToString(newdef->typename))));
|
||||
/* Mark the column as locally defined */
|
||||
def->is_local = true;
|
||||
/* Merge of NOT NULL constraints = OR 'em together */
|
||||
@ -1230,8 +1231,8 @@ renameatt(Oid myrelid,
|
||||
0, 0))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" already exists",
|
||||
newattname, RelationGetRelationName(targetrelation))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" already exists",
|
||||
newattname, RelationGetRelationName(targetrelation))));
|
||||
|
||||
namestrcpy(&(attform->attname), newattname);
|
||||
|
||||
@ -1257,7 +1258,7 @@ renameatt(Oid myrelid,
|
||||
|
||||
/*
|
||||
* Scan through index columns to see if there's any simple index
|
||||
* entries for this attribute. We ignore expressional entries.
|
||||
* entries for this attribute. We ignore expressional entries.
|
||||
*/
|
||||
indextup = SearchSysCache(INDEXRELID,
|
||||
ObjectIdGetDatum(indexoid),
|
||||
@ -1270,6 +1271,7 @@ renameatt(Oid myrelid,
|
||||
{
|
||||
if (attnum != indexform->indkey[i])
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Found one, rename it.
|
||||
*/
|
||||
@ -1279,6 +1281,7 @@ renameatt(Oid myrelid,
|
||||
0, 0);
|
||||
if (!HeapTupleIsValid(atttup))
|
||||
continue; /* should we raise an error? */
|
||||
|
||||
/*
|
||||
* Update the (copied) attribute tuple.
|
||||
*/
|
||||
@ -1366,7 +1369,7 @@ renamerel(Oid myrelid, const char *newrelname)
|
||||
reltup = SearchSysCacheCopy(RELOID,
|
||||
PointerGetDatum(myrelid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
|
||||
if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
|
||||
elog(ERROR, "cache lookup failed for relation %u", myrelid);
|
||||
|
||||
if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
|
||||
@ -1743,7 +1746,7 @@ AlterTableAddColumn(Oid myrelid,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("child table \"%s\" has different type for column \"%s\"",
|
||||
get_rel_name(childrelid), colDef->colname)));
|
||||
get_rel_name(childrelid), colDef->colname)));
|
||||
|
||||
/*
|
||||
* XXX if we supported NOT NULL or defaults, would need to do
|
||||
@ -1782,7 +1785,7 @@ AlterTableAddColumn(Oid myrelid,
|
||||
if (find_inheritance_children(myrelid) != NIL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
||||
errmsg("attribute must be added to child tables too")));
|
||||
errmsg("attribute must be added to child tables too")));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1801,14 +1804,14 @@ AlterTableAddColumn(Oid myrelid,
|
||||
if (colDef->raw_default || colDef->cooked_default)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("adding columns with defaults is not implemented"),
|
||||
errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
|
||||
errmsg("adding columns with defaults is not implemented"),
|
||||
errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
|
||||
|
||||
if (colDef->is_not_null)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("adding NOT NULL columns is not implemented"),
|
||||
errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
|
||||
errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
|
||||
|
||||
pgclass = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
|
||||
@ -1829,8 +1832,8 @@ AlterTableAddColumn(Oid myrelid,
|
||||
0, 0))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" already exists",
|
||||
colDef->colname, RelationGetRelationName(rel))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" already exists",
|
||||
colDef->colname, RelationGetRelationName(rel))));
|
||||
|
||||
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
|
||||
maxatts = minattnum + 1;
|
||||
@ -2014,8 +2017,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
|
||||
if (attnum == InvalidAttrNumber)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
|
||||
/* Prevent them from altering a system attribute */
|
||||
if (attnum < 0)
|
||||
@ -2057,8 +2060,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
|
||||
if (indexStruct->indkey[i] == attnum)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
||||
errmsg("attribute \"%s\" is in a primary key",
|
||||
colName)));
|
||||
errmsg("attribute \"%s\" is in a primary key",
|
||||
colName)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2158,8 +2161,8 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
|
||||
if (attnum == InvalidAttrNumber)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
|
||||
/* Prevent them from altering a system attribute */
|
||||
if (attnum < 0)
|
||||
@ -2286,8 +2289,8 @@ AlterTableAlterColumnDefault(Oid myrelid, bool recurse,
|
||||
if (attnum == InvalidAttrNumber)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
|
||||
/* Prevent them from altering a system attribute */
|
||||
if (attnum < 0)
|
||||
@ -2450,8 +2453,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
|
||||
|
||||
if (attrtuple->attnum < 0)
|
||||
@ -2476,8 +2479,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("column datatype %s can only have storage \"plain\"",
|
||||
format_type_be(attrtuple->atttypid))));
|
||||
errmsg("column datatype %s can only have storage \"plain\"",
|
||||
format_type_be(attrtuple->atttypid))));
|
||||
}
|
||||
|
||||
simple_heap_update(attrelation, &tuple->t_self, tuple);
|
||||
@ -2573,7 +2576,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
|
||||
(errmsg("table \"%s\" is already WITHOUT OIDS",
|
||||
RelationGetRelationName(rel))));
|
||||
heap_close(class_rel, RowExclusiveLock);
|
||||
heap_close(rel, NoLock); /* close rel, but keep lock! */
|
||||
heap_close(rel, NoLock); /* close rel, but keep lock! */
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2601,8 +2604,8 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
|
||||
attrel = heap_open(RelOid_pg_attribute, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Oids are being removed from the relation, so we need
|
||||
* to remove the oid pg_attribute record relating.
|
||||
* Oids are being removed from the relation, so we need to remove
|
||||
* the oid pg_attribute record relating.
|
||||
*/
|
||||
atttup = SearchSysCache(ATTNUM,
|
||||
ObjectIdGetDatum(myrelid),
|
||||
@ -2621,7 +2624,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
|
||||
|
||||
heap_close(class_rel, RowExclusiveLock);
|
||||
|
||||
heap_close(rel, NoLock); /* close rel, but keep lock! */
|
||||
heap_close(rel, NoLock); /* close rel, but keep lock! */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2663,8 +2666,8 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
|
||||
if (attnum == InvalidAttrNumber)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
errmsg("attribute \"%s\" of relation \"%s\" does not exist",
|
||||
colName, RelationGetRelationName(rel))));
|
||||
|
||||
/* Can't drop a system attribute */
|
||||
/* XXX perhaps someday allow dropping OID? */
|
||||
@ -2712,7 +2715,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
|
||||
colName, childrelid);
|
||||
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
|
||||
|
||||
if (childatt->attinhcount <= 0) /* shouldn't happen */
|
||||
if (childatt->attinhcount <= 0) /* shouldn't happen */
|
||||
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
|
||||
childrelid, colName);
|
||||
childatt->attinhcount--;
|
||||
@ -2731,9 +2734,9 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
|
||||
}
|
||||
|
||||
/*
|
||||
* Propagate to children if desired. Unlike most other ALTER routines,
|
||||
* we have to do this one level of recursion at a time; we can't use
|
||||
* find_all_inheritors to do it in one pass.
|
||||
* Propagate to children if desired. Unlike most other ALTER
|
||||
* routines, we have to do this one level of recursion at a time; we
|
||||
* can't use find_all_inheritors to do it in one pass.
|
||||
*/
|
||||
if (recurse)
|
||||
{
|
||||
@ -2763,7 +2766,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
|
||||
colName, childrelid);
|
||||
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
|
||||
|
||||
if (childatt->attinhcount <= 0) /* shouldn't happen */
|
||||
if (childatt->attinhcount <= 0) /* shouldn't happen */
|
||||
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
|
||||
childrelid, colName);
|
||||
|
||||
@ -2882,18 +2885,18 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
|
||||
{
|
||||
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
|
||||
RelationGetRelid(rel),
|
||||
RelationGetNamespace(rel),
|
||||
RelationGetNamespace(rel),
|
||||
constr->name))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("constraint \"%s\" for relation \"%s\" already exists",
|
||||
constr->name,
|
||||
RelationGetRelationName(rel))));
|
||||
RelationGetRelationName(rel))));
|
||||
}
|
||||
else
|
||||
constr->name = GenerateConstraintName(CONSTRAINT_RELATION,
|
||||
RelationGetRelid(rel),
|
||||
RelationGetNamespace(rel),
|
||||
RelationGetRelid(rel),
|
||||
RelationGetNamespace(rel),
|
||||
&counter);
|
||||
|
||||
/*
|
||||
@ -2923,14 +2926,14 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
|
||||
if (fkconstraint->constr_name)
|
||||
{
|
||||
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
|
||||
RelationGetRelid(rel),
|
||||
RelationGetRelid(rel),
|
||||
RelationGetNamespace(rel),
|
||||
fkconstraint->constr_name))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("constraint \"%s\" for relation \"%s\" already exists",
|
||||
fkconstraint->constr_name,
|
||||
RelationGetRelationName(rel))));
|
||||
RelationGetRelationName(rel))));
|
||||
}
|
||||
else
|
||||
fkconstraint->constr_name = GenerateConstraintName(CONSTRAINT_RELATION,
|
||||
@ -2959,7 +2962,7 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
|
||||
/*
|
||||
* Add a check constraint to a single table
|
||||
*
|
||||
* Subroutine for AlterTableAddConstraint. Must already hold exclusive
|
||||
* Subroutine for AlterTableAddConstraint. Must already hold exclusive
|
||||
* lock on the rel, and have done appropriate validity/permissions checks
|
||||
* for it.
|
||||
*/
|
||||
@ -2979,13 +2982,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
|
||||
Node *expr;
|
||||
|
||||
/*
|
||||
* We need to make a parse state and range
|
||||
* table to allow us to do transformExpr()
|
||||
* We need to make a parse state and range table to allow us to do
|
||||
* transformExpr()
|
||||
*/
|
||||
pstate = make_parsestate(NULL);
|
||||
rte = addRangeTableEntryForRelation(pstate,
|
||||
RelationGetRelid(rel),
|
||||
makeAlias(RelationGetRelationName(rel), NIL),
|
||||
makeAlias(RelationGetRelationName(rel), NIL),
|
||||
false,
|
||||
true);
|
||||
addRTEtoQuery(pstate, rte, true, true);
|
||||
@ -3006,8 +3009,8 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
|
||||
if (length(pstate->p_rtable) != 1)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("CHECK constraint may only reference relation \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
errmsg("CHECK constraint may only reference relation \"%s\"",
|
||||
RelationGetRelationName(rel))));
|
||||
|
||||
/*
|
||||
* No subplans or aggregates, either...
|
||||
@ -3070,15 +3073,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
|
||||
if (!successful)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CHECK_VIOLATION),
|
||||
errmsg("CHECK constraint \"%s\" is violated at some row(s)",
|
||||
constr->name)));
|
||||
errmsg("CHECK constraint \"%s\" is violated at some row(s)",
|
||||
constr->name)));
|
||||
|
||||
/*
|
||||
* Call AddRelationRawConstraints to do
|
||||
* the real adding -- It duplicates some
|
||||
* of the above, but does not check the
|
||||
* validity of the constraint against
|
||||
* tuples already in the table.
|
||||
* Call AddRelationRawConstraints to do the real adding -- It
|
||||
* duplicates some of the above, but does not check the validity of
|
||||
* the constraint against tuples already in the table.
|
||||
*/
|
||||
AddRelationRawConstraints(rel, NIL, makeList1(constr));
|
||||
}
|
||||
@ -3086,7 +3087,7 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
|
||||
/*
|
||||
* Add a foreign-key constraint to a single table
|
||||
*
|
||||
* Subroutine for AlterTableAddConstraint. Must already hold exclusive
|
||||
* Subroutine for AlterTableAddConstraint. Must already hold exclusive
|
||||
* lock on the rel, and have done appropriate validity/permissions checks
|
||||
* for it.
|
||||
*/
|
||||
@ -3106,12 +3107,11 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
|
||||
Oid constrOid;
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the pk table, so that
|
||||
* someone doesn't delete rows out from under us.
|
||||
* (Although a lesser lock would do for that purpose,
|
||||
* we'll need exclusive lock anyway to add triggers to
|
||||
* the pk table; trying to start with a lesser lock
|
||||
* will just create a risk of deadlock.)
|
||||
* Grab an exclusive lock on the pk table, so that someone doesn't
|
||||
* delete rows out from under us. (Although a lesser lock would do for
|
||||
* that purpose, we'll need exclusive lock anyway to add triggers to
|
||||
* the pk table; trying to start with a lesser lock will just create a
|
||||
* risk of deadlock.)
|
||||
*/
|
||||
pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock);
|
||||
|
||||
@ -3152,8 +3152,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
|
||||
errmsg("cannot reference temporary table from permanent table constraint")));
|
||||
|
||||
/*
|
||||
* Look up the referencing attributes to make sure they
|
||||
* exist, and record their attnums and type OIDs.
|
||||
* Look up the referencing attributes to make sure they exist, and
|
||||
* record their attnums and type OIDs.
|
||||
*/
|
||||
for (i = 0; i < INDEX_MAX_KEYS; i++)
|
||||
{
|
||||
@ -3166,10 +3166,10 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
|
||||
fkattnum, fktypoid);
|
||||
|
||||
/*
|
||||
* If the attribute list for the referenced table was omitted,
|
||||
* lookup the definition of the primary key and use it. Otherwise,
|
||||
* validate the supplied attribute list. In either case, discover
|
||||
* the index OID and the attnums and type OIDs of the attributes.
|
||||
* If the attribute list for the referenced table was omitted, lookup
|
||||
* the definition of the primary key and use it. Otherwise, validate
|
||||
* the supplied attribute list. In either case, discover the index
|
||||
* OID and the attnums and type OIDs of the attributes.
|
||||
*/
|
||||
if (fkconstraint->pk_attrs == NIL)
|
||||
{
|
||||
@ -3208,8 +3208,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the constraint is satisfied by existing
|
||||
* rows (we can skip this during table creation).
|
||||
* Check that the constraint is satisfied by existing rows (we can
|
||||
* skip this during table creation).
|
||||
*/
|
||||
if (!fkconstraint->skip_validation)
|
||||
validateForeignKeyConstraint(fkconstraint, rel, pkrel);
|
||||
@ -3225,7 +3225,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
|
||||
RelationGetRelid(rel),
|
||||
fkattnum,
|
||||
numfks,
|
||||
InvalidOid, /* not a domain constraint */
|
||||
InvalidOid, /* not a domain
|
||||
* constraint */
|
||||
RelationGetRelid(pkrel),
|
||||
pkattnum,
|
||||
numpks,
|
||||
@ -3233,7 +3234,7 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
|
||||
fkconstraint->fk_del_action,
|
||||
fkconstraint->fk_matchtype,
|
||||
indexOid,
|
||||
NULL, /* no check constraint */
|
||||
NULL, /* no check constraint */
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
@ -3276,8 +3277,8 @@ transformColumnNameList(Oid relId, List *colList,
|
||||
if (attnum >= INDEX_MAX_KEYS)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_TOO_MANY_COLUMNS),
|
||||
errmsg("cannot have more than %d keys in a foreign key",
|
||||
INDEX_MAX_KEYS)));
|
||||
errmsg("cannot have more than %d keys in a foreign key",
|
||||
INDEX_MAX_KEYS)));
|
||||
attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum;
|
||||
atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid;
|
||||
ReleaseSysCache(atttuple);
|
||||
@ -3291,7 +3292,7 @@ transformColumnNameList(Oid relId, List *colList,
|
||||
* transformFkeyGetPrimaryKey -
|
||||
*
|
||||
* Look up the names, attnums, and types of the primary key attributes
|
||||
* for the pkrel. Used when the column list in the REFERENCES specification
|
||||
* for the pkrel. Used when the column list in the REFERENCES specification
|
||||
* is omitted.
|
||||
*/
|
||||
static int
|
||||
@ -3339,12 +3340,12 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
|
||||
if (indexStruct == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
|
||||
RelationGetRelationName(pkrel))));
|
||||
errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
|
||||
RelationGetRelationName(pkrel))));
|
||||
|
||||
/*
|
||||
* Now build the list of PK attributes from the indkey definition
|
||||
* (we assume a primary key cannot have expressional elements)
|
||||
* Now build the list of PK attributes from the indkey definition (we
|
||||
* assume a primary key cannot have expressional elements)
|
||||
*/
|
||||
*attnamelist = NIL;
|
||||
for (i = 0; i < indexStruct->indnatts; i++)
|
||||
@ -3389,7 +3390,8 @@ transformFkeyCheckAttrs(Relation pkrel,
|
||||
{
|
||||
HeapTuple indexTuple;
|
||||
Form_pg_index indexStruct;
|
||||
int i, j;
|
||||
int i,
|
||||
j;
|
||||
|
||||
indexoid = lfirsto(indexoidscan);
|
||||
indexTuple = SearchSysCache(INDEXRELID,
|
||||
@ -3453,7 +3455,7 @@ transformFkeyCheckAttrs(Relation pkrel,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
|
||||
errmsg("there is no UNIQUE constraint matching given keys for referenced table \"%s\"",
|
||||
RelationGetRelationName(pkrel))));
|
||||
RelationGetRelationName(pkrel))));
|
||||
|
||||
freeList(indexoidlist);
|
||||
|
||||
@ -3969,17 +3971,17 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
|
||||
void
|
||||
AlterTableClusterOn(Oid relOid, const char *indexName)
|
||||
{
|
||||
Relation rel,
|
||||
pg_index;
|
||||
List *index;
|
||||
Oid indexOid;
|
||||
HeapTuple indexTuple;
|
||||
Form_pg_index indexForm;
|
||||
|
||||
Relation rel,
|
||||
pg_index;
|
||||
List *index;
|
||||
Oid indexOid;
|
||||
HeapTuple indexTuple;
|
||||
Form_pg_index indexForm;
|
||||
|
||||
rel = heap_open(relOid, AccessExclusiveLock);
|
||||
|
||||
indexOid = get_relname_relid(indexName, rel->rd_rel->relnamespace);
|
||||
|
||||
|
||||
if (!OidIsValid(indexOid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
@ -3994,36 +3996,37 @@ AlterTableClusterOn(Oid relOid, const char *indexName)
|
||||
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
|
||||
/*
|
||||
* If this is the same index the relation was previously
|
||||
* clustered on, no need to do anything.
|
||||
* If this is the same index the relation was previously clustered on,
|
||||
* no need to do anything.
|
||||
*/
|
||||
if (indexForm->indisclustered)
|
||||
{
|
||||
ereport(NOTICE,
|
||||
(errmsg("table \"%s\" is already being clustered on index \"%s\"",
|
||||
NameStr(rel->rd_rel->relname), indexName)));
|
||||
(errmsg("table \"%s\" is already being clustered on index \"%s\"",
|
||||
NameStr(rel->rd_rel->relname), indexName)));
|
||||
ReleaseSysCache(indexTuple);
|
||||
heap_close(rel, NoLock);
|
||||
return;
|
||||
}
|
||||
|
||||
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
|
||||
|
||||
|
||||
/*
|
||||
* Now check each index in the relation and set the bit where needed.
|
||||
*/
|
||||
foreach (index, RelationGetIndexList(rel))
|
||||
foreach(index, RelationGetIndexList(rel))
|
||||
{
|
||||
HeapTuple idxtuple;
|
||||
Form_pg_index idxForm;
|
||||
|
||||
HeapTuple idxtuple;
|
||||
Form_pg_index idxForm;
|
||||
|
||||
indexOid = lfirsto(index);
|
||||
idxtuple = SearchSysCacheCopy(INDEXRELID,
|
||||
ObjectIdGetDatum(indexOid),
|
||||
ObjectIdGetDatum(indexOid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(idxtuple))
|
||||
elog(ERROR, "cache lookup failed for index %u", indexOid);
|
||||
idxForm = (Form_pg_index) GETSTRUCT(idxtuple);
|
||||
|
||||
/*
|
||||
* Unset the bit if set. We know it's wrong because we checked
|
||||
* this earlier.
|
||||
@ -4100,7 +4103,7 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
|
||||
if (shared_relation && IsUnderPostmaster)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("shared relations cannot be toasted after initdb")));
|
||||
errmsg("shared relations cannot be toasted after initdb")));
|
||||
|
||||
/*
|
||||
* Is it already toasted?
|
||||
@ -4331,12 +4334,12 @@ needs_toast_table(Relation rel)
|
||||
void
|
||||
register_on_commit_action(Oid relid, OnCommitAction action)
|
||||
{
|
||||
OnCommitItem *oc;
|
||||
OnCommitItem *oc;
|
||||
MemoryContext oldcxt;
|
||||
|
||||
/*
|
||||
* We needn't bother registering the relation unless there is an ON COMMIT
|
||||
* action we need to take.
|
||||
* We needn't bother registering the relation unless there is an ON
|
||||
* COMMIT action we need to take.
|
||||
*/
|
||||
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
|
||||
return;
|
||||
@ -4366,7 +4369,7 @@ remove_on_commit_action(Oid relid)
|
||||
|
||||
foreach(l, on_commits)
|
||||
{
|
||||
OnCommitItem *oc = (OnCommitItem *) lfirst(l);
|
||||
OnCommitItem *oc = (OnCommitItem *) lfirst(l);
|
||||
|
||||
if (oc->relid == relid)
|
||||
{
|
||||
@ -4389,7 +4392,7 @@ PreCommit_on_commit_actions(void)
|
||||
|
||||
foreach(l, on_commits)
|
||||
{
|
||||
OnCommitItem *oc = (OnCommitItem *) lfirst(l);
|
||||
OnCommitItem *oc = (OnCommitItem *) lfirst(l);
|
||||
|
||||
/* Ignore entry if already dropped in this xact */
|
||||
if (oc->deleted_in_cur_xact)
|
||||
@ -4403,23 +4406,25 @@ PreCommit_on_commit_actions(void)
|
||||
break;
|
||||
case ONCOMMIT_DELETE_ROWS:
|
||||
heap_truncate(oc->relid);
|
||||
CommandCounterIncrement(); /* XXX needed? */
|
||||
CommandCounterIncrement(); /* XXX needed? */
|
||||
break;
|
||||
case ONCOMMIT_DROP:
|
||||
{
|
||||
ObjectAddress object;
|
||||
{
|
||||
ObjectAddress object;
|
||||
|
||||
object.classId = RelOid_pg_class;
|
||||
object.objectId = oc->relid;
|
||||
object.objectSubId = 0;
|
||||
performDeletion(&object, DROP_CASCADE);
|
||||
/*
|
||||
* Note that table deletion will call remove_on_commit_action,
|
||||
* so the entry should get marked as deleted.
|
||||
*/
|
||||
Assert(oc->deleted_in_cur_xact);
|
||||
break;
|
||||
}
|
||||
object.classId = RelOid_pg_class;
|
||||
object.objectId = oc->relid;
|
||||
object.objectSubId = 0;
|
||||
performDeletion(&object, DROP_CASCADE);
|
||||
|
||||
/*
|
||||
* Note that table deletion will call
|
||||
* remove_on_commit_action, so the entry should get
|
||||
* marked as deleted.
|
||||
*/
|
||||
Assert(oc->deleted_in_cur_xact);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4442,7 +4447,7 @@ AtEOXact_on_commit_actions(bool isCommit)
|
||||
l = on_commits;
|
||||
while (l != NIL)
|
||||
{
|
||||
OnCommitItem *oc = (OnCommitItem *) lfirst(l);
|
||||
OnCommitItem *oc = (OnCommitItem *) lfirst(l);
|
||||
|
||||
if (isCommit ? oc->deleted_in_cur_xact :
|
||||
oc->created_in_cur_xact)
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.153 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.154 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -41,17 +41,17 @@
|
||||
|
||||
static void InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx);
|
||||
static HeapTuple GetTupleForTrigger(EState *estate,
|
||||
ResultRelInfo *relinfo,
|
||||
ItemPointer tid,
|
||||
CommandId cid,
|
||||
TupleTableSlot **newSlot);
|
||||
ResultRelInfo *relinfo,
|
||||
ItemPointer tid,
|
||||
CommandId cid,
|
||||
TupleTableSlot **newSlot);
|
||||
static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
|
||||
FmgrInfo *finfo,
|
||||
MemoryContext per_tuple_context);
|
||||
static void DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
|
||||
bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
|
||||
bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
|
||||
static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
|
||||
Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
|
||||
Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
|
||||
MemoryContext per_tuple_context);
|
||||
|
||||
|
||||
@ -97,18 +97,19 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
|
||||
else if (stmt->isconstraint)
|
||||
{
|
||||
/*
|
||||
* If this trigger is a constraint (and a foreign key one)
|
||||
* then we really need a constrrelid. Since we don't have one,
|
||||
* we'll try to generate one from the argument information.
|
||||
* If this trigger is a constraint (and a foreign key one) then we
|
||||
* really need a constrrelid. Since we don't have one, we'll try
|
||||
* to generate one from the argument information.
|
||||
*
|
||||
* This is really just a workaround for a long-ago pg_dump bug
|
||||
* that omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
|
||||
* commands. We don't want to bomb out completely here if we can't
|
||||
* determine the correct relation, because that would prevent loading
|
||||
* the dump file. Instead, NOTICE here and ERROR in the trigger.
|
||||
* This is really just a workaround for a long-ago pg_dump bug that
|
||||
* omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
|
||||
* commands. We don't want to bomb out completely here if we
|
||||
* can't determine the correct relation, because that would
|
||||
* prevent loading the dump file. Instead, NOTICE here and ERROR
|
||||
* in the trigger.
|
||||
*/
|
||||
bool needconstrrelid = false;
|
||||
void *elem = NULL;
|
||||
bool needconstrrelid = false;
|
||||
void *elem = NULL;
|
||||
|
||||
if (strncmp(strVal(llast(stmt->funcname)), "RI_FKey_check_", 14) == 0)
|
||||
{
|
||||
@ -265,8 +266,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
|
||||
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("trigger \"%s\" for relation \"%s\" already exists",
|
||||
trigname, stmt->relation->relname)));
|
||||
errmsg("trigger \"%s\" for relation \"%s\" already exists",
|
||||
trigname, stmt->relation->relname)));
|
||||
found++;
|
||||
}
|
||||
systable_endscan(tgscan);
|
||||
@ -280,7 +281,7 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
|
||||
if (funcrettype != TRIGGEROID)
|
||||
{
|
||||
/*
|
||||
* We allow OPAQUE just so we can load old dump files. When we
|
||||
* We allow OPAQUE just so we can load old dump files. When we
|
||||
* see a trigger function declared OPAQUE, change it to TRIGGER.
|
||||
*/
|
||||
if (funcrettype == OPAQUEOID)
|
||||
@ -480,8 +481,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
|
||||
if (!HeapTupleIsValid(tup))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("trigger \"%s\" for relation \"%s\" does not exist",
|
||||
trigname, get_rel_name(relid))));
|
||||
errmsg("trigger \"%s\" for relation \"%s\" does not exist",
|
||||
trigname, get_rel_name(relid))));
|
||||
|
||||
if (!pg_class_ownercheck(relid, GetUserId()))
|
||||
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
|
||||
@ -576,7 +577,7 @@ RemoveTriggerById(Oid trigOid)
|
||||
elog(ERROR, "cache lookup failed for relation %u", relid);
|
||||
classForm = (Form_pg_class) GETSTRUCT(tuple);
|
||||
|
||||
if (classForm->reltriggers == 0) /* should not happen */
|
||||
if (classForm->reltriggers == 0) /* should not happen */
|
||||
elog(ERROR, "relation \"%s\" has reltriggers = 0",
|
||||
RelationGetRelationName(rel));
|
||||
classForm->reltriggers--;
|
||||
@ -650,8 +651,8 @@ renametrig(Oid relid,
|
||||
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("trigger \"%s\" for relation \"%s\" already exists",
|
||||
newname, RelationGetRelationName(targetrel))));
|
||||
errmsg("trigger \"%s\" for relation \"%s\" already exists",
|
||||
newname, RelationGetRelationName(targetrel))));
|
||||
systable_endscan(tgscan);
|
||||
|
||||
/*
|
||||
@ -693,8 +694,8 @@ renametrig(Oid relid,
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("trigger \"%s\" for relation \"%s\" does not exist",
|
||||
oldname, RelationGetRelationName(targetrel))));
|
||||
errmsg("trigger \"%s\" for relation \"%s\" does not exist",
|
||||
oldname, RelationGetRelationName(targetrel))));
|
||||
}
|
||||
|
||||
systable_endscan(tgscan);
|
||||
@ -762,7 +763,7 @@ RelationBuildTriggers(Relation relation)
|
||||
|
||||
build->tgoid = HeapTupleGetOid(htup);
|
||||
build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
|
||||
NameGetDatum(&pg_trigger->tgname)));
|
||||
NameGetDatum(&pg_trigger->tgname)));
|
||||
build->tgfoid = pg_trigger->tgfoid;
|
||||
build->tgtype = pg_trigger->tgtype;
|
||||
build->tgenabled = pg_trigger->tgenabled;
|
||||
@ -927,8 +928,8 @@ CopyTriggerDesc(TriggerDesc *trigdesc)
|
||||
trigger->tgname = pstrdup(trigger->tgname);
|
||||
if (trigger->tgnargs > 0)
|
||||
{
|
||||
char **newargs;
|
||||
int16 j;
|
||||
char **newargs;
|
||||
int16 j;
|
||||
|
||||
newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
|
||||
for (j = 0; j < trigger->tgnargs; j++)
|
||||
@ -1101,7 +1102,7 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
#endif /* NOT_USED */
|
||||
#endif /* NOT_USED */
|
||||
|
||||
/*
|
||||
* Call a trigger function.
|
||||
@ -1166,10 +1167,10 @@ ExecCallTriggerFunc(TriggerData *trigdata,
|
||||
void
|
||||
ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
{
|
||||
TriggerDesc *trigdesc;
|
||||
int ntrigs;
|
||||
int *tgindx;
|
||||
int i;
|
||||
TriggerDesc *trigdesc;
|
||||
int ntrigs;
|
||||
int *tgindx;
|
||||
int i;
|
||||
TriggerData LocTriggerData;
|
||||
|
||||
trigdesc = relinfo->ri_TrigDesc;
|
||||
@ -1190,10 +1191,10 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
|
||||
LocTriggerData.type = T_TriggerData;
|
||||
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
LocTriggerData.tg_trigtuple = NULL;
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
LocTriggerData.tg_trigtuple = NULL;
|
||||
for (i = 0; i < ntrigs; i++)
|
||||
{
|
||||
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
|
||||
@ -1209,7 +1210,7 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
if (newtuple)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
|
||||
errmsg("BEFORE STATEMENT trigger cannot return a value")));
|
||||
errmsg("BEFORE STATEMENT trigger cannot return a value")));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1242,8 +1243,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
|
||||
LocTriggerData.type = T_TriggerData;
|
||||
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
|
||||
TRIGGER_EVENT_ROW |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
TRIGGER_EVENT_ROW |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
for (i = 0; i < ntrigs; i++)
|
||||
@ -1279,10 +1280,10 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
void
|
||||
ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
{
|
||||
TriggerDesc *trigdesc;
|
||||
int ntrigs;
|
||||
int *tgindx;
|
||||
int i;
|
||||
TriggerDesc *trigdesc;
|
||||
int ntrigs;
|
||||
int *tgindx;
|
||||
int i;
|
||||
TriggerData LocTriggerData;
|
||||
|
||||
trigdesc = relinfo->ri_TrigDesc;
|
||||
@ -1303,10 +1304,10 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
|
||||
LocTriggerData.type = T_TriggerData;
|
||||
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
LocTriggerData.tg_trigtuple = NULL;
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
LocTriggerData.tg_trigtuple = NULL;
|
||||
for (i = 0; i < ntrigs; i++)
|
||||
{
|
||||
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
|
||||
@ -1322,7 +1323,7 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
if (newtuple)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
|
||||
errmsg("BEFORE STATEMENT trigger cannot return a value")));
|
||||
errmsg("BEFORE STATEMENT trigger cannot return a value")));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1361,8 +1362,8 @@ ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
|
||||
LocTriggerData.type = T_TriggerData;
|
||||
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
|
||||
TRIGGER_EVENT_ROW |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
TRIGGER_EVENT_ROW |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
for (i = 0; i < ntrigs; i++)
|
||||
@ -1408,10 +1409,10 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
void
|
||||
ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
{
|
||||
TriggerDesc *trigdesc;
|
||||
int ntrigs;
|
||||
int *tgindx;
|
||||
int i;
|
||||
TriggerDesc *trigdesc;
|
||||
int ntrigs;
|
||||
int *tgindx;
|
||||
int i;
|
||||
TriggerData LocTriggerData;
|
||||
|
||||
trigdesc = relinfo->ri_TrigDesc;
|
||||
@ -1432,10 +1433,10 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
|
||||
LocTriggerData.type = T_TriggerData;
|
||||
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
LocTriggerData.tg_trigtuple = NULL;
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
LocTriggerData.tg_newtuple = NULL;
|
||||
LocTriggerData.tg_trigtuple = NULL;
|
||||
for (i = 0; i < ntrigs; i++)
|
||||
{
|
||||
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
|
||||
@ -1451,7 +1452,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
|
||||
if (newtuple)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
|
||||
errmsg("BEFORE STATEMENT trigger cannot return a value")));
|
||||
errmsg("BEFORE STATEMENT trigger cannot return a value")));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1498,8 +1499,8 @@ ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
|
||||
LocTriggerData.type = T_TriggerData;
|
||||
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
|
||||
TRIGGER_EVENT_ROW |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
TRIGGER_EVENT_ROW |
|
||||
TRIGGER_EVENT_BEFORE;
|
||||
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
|
||||
for (i = 0; i < ntrigs; i++)
|
||||
{
|
||||
@ -1639,19 +1640,20 @@ ltrmark:;
|
||||
* ----------
|
||||
*/
|
||||
|
||||
typedef struct DeferredTriggersData {
|
||||
/* Internal data is held in a per-transaction memory context */
|
||||
MemoryContext deftrig_cxt;
|
||||
/* ALL DEFERRED or ALL IMMEDIATE */
|
||||
bool deftrig_all_isset;
|
||||
bool deftrig_all_isdeferred;
|
||||
/* Per trigger state */
|
||||
List *deftrig_trigstates;
|
||||
/* List of pending deferred triggers. Previous comment below */
|
||||
DeferredTriggerEvent deftrig_events;
|
||||
DeferredTriggerEvent deftrig_events_imm;
|
||||
DeferredTriggerEvent deftrig_event_tail;
|
||||
} DeferredTriggersData;
|
||||
typedef struct DeferredTriggersData
|
||||
{
|
||||
/* Internal data is held in a per-transaction memory context */
|
||||
MemoryContext deftrig_cxt;
|
||||
/* ALL DEFERRED or ALL IMMEDIATE */
|
||||
bool deftrig_all_isset;
|
||||
bool deftrig_all_isdeferred;
|
||||
/* Per trigger state */
|
||||
List *deftrig_trigstates;
|
||||
/* List of pending deferred triggers. Previous comment below */
|
||||
DeferredTriggerEvent deftrig_events;
|
||||
DeferredTriggerEvent deftrig_events_imm;
|
||||
DeferredTriggerEvent deftrig_event_tail;
|
||||
} DeferredTriggersData;
|
||||
|
||||
/* ----------
|
||||
* deftrig_events, deftrig_event_tail:
|
||||
@ -1661,8 +1663,8 @@ typedef struct DeferredTriggersData {
|
||||
* Because this can grow pretty large, we don't use separate List nodes,
|
||||
* but instead thread the list through the dte_next fields of the member
|
||||
* nodes. Saves just a few bytes per entry, but that adds up.
|
||||
*
|
||||
* deftrig_events_imm holds the tail pointer as of the last
|
||||
*
|
||||
* deftrig_events_imm holds the tail pointer as of the last
|
||||
* deferredTriggerInvokeEvents call; we can use this to avoid rescanning
|
||||
* entries unnecessarily. It is NULL if deferredTriggerInvokeEvents
|
||||
* hasn't run since the last state change.
|
||||
@ -1674,7 +1676,7 @@ typedef struct DeferredTriggersData {
|
||||
|
||||
typedef DeferredTriggersData *DeferredTriggers;
|
||||
|
||||
static DeferredTriggers deferredTriggers;
|
||||
static DeferredTriggers deferredTriggers;
|
||||
|
||||
/* ----------
|
||||
* deferredTriggerCheckState()
|
||||
@ -1783,7 +1785,7 @@ deferredTriggerAddEvent(DeferredTriggerEvent event)
|
||||
*/
|
||||
static void
|
||||
DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
|
||||
Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
|
||||
Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
|
||||
MemoryContext per_tuple_context)
|
||||
{
|
||||
Oid tgoid = event->dte_item[itemno].dti_tgoid;
|
||||
@ -1817,7 +1819,7 @@ DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
|
||||
*/
|
||||
LocTriggerData.type = T_TriggerData;
|
||||
LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
|
||||
(event->dte_event & TRIGGER_EVENT_ROW);
|
||||
(event->dte_event & TRIGGER_EVENT_ROW);
|
||||
LocTriggerData.tg_relation = rel;
|
||||
|
||||
LocTriggerData.tg_trigger = NULL;
|
||||
@ -1899,12 +1901,12 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
* are going to discard the whole event queue on return anyway, so no
|
||||
* need to bother with "retail" pfree's.
|
||||
*
|
||||
* If immediate_only is true, we need only scan from where the end of
|
||||
* the queue was at the previous deferredTriggerInvokeEvents call;
|
||||
* any non-deferred events before that point are already fired.
|
||||
* (But if the deferral state changes, we must reset the saved position
|
||||
* to the beginning of the queue, so as to process all events once with
|
||||
* the new states. See DeferredTriggerSetState.)
|
||||
* If immediate_only is true, we need only scan from where the end of the
|
||||
* queue was at the previous deferredTriggerInvokeEvents call; any
|
||||
* non-deferred events before that point are already fired. (But if
|
||||
* the deferral state changes, we must reset the saved position to the
|
||||
* beginning of the queue, so as to process all events once with the
|
||||
* new states. See DeferredTriggerSetState.)
|
||||
*/
|
||||
|
||||
/* Make a per-tuple memory context for trigger function calls */
|
||||
@ -1916,9 +1918,9 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
/*
|
||||
* If immediate_only is true, then the only events that could need firing
|
||||
* are those since deftrig_events_imm. (But if deftrig_events_imm is
|
||||
* NULL, we must scan the entire list.)
|
||||
* If immediate_only is true, then the only events that could need
|
||||
* firing are those since deftrig_events_imm. (But if
|
||||
* deftrig_events_imm is NULL, we must scan the entire list.)
|
||||
*/
|
||||
if (immediate_only && deferredTriggers->deftrig_events_imm != NULL)
|
||||
{
|
||||
@ -1984,17 +1986,18 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
rel = heap_open(event->dte_relid, NoLock);
|
||||
|
||||
/*
|
||||
* Copy relation's trigger info so that we have a stable
|
||||
* copy no matter what the called triggers do.
|
||||
* Copy relation's trigger info so that we have a
|
||||
* stable copy no matter what the called triggers do.
|
||||
*/
|
||||
trigdesc = CopyTriggerDesc(rel->trigdesc);
|
||||
|
||||
if (trigdesc == NULL) /* should not happen */
|
||||
if (trigdesc == NULL) /* should not happen */
|
||||
elog(ERROR, "relation %u has no triggers",
|
||||
event->dte_relid);
|
||||
|
||||
/*
|
||||
* Allocate space to cache fmgr lookup info for triggers.
|
||||
* Allocate space to cache fmgr lookup info for
|
||||
* triggers.
|
||||
*/
|
||||
finfo = (FmgrInfo *)
|
||||
palloc0(trigdesc->numtriggers * sizeof(FmgrInfo));
|
||||
@ -2089,21 +2092,23 @@ void
|
||||
DeferredTriggerBeginXact(void)
|
||||
{
|
||||
/*
|
||||
* This will be changed to a special context when
|
||||
* the nested transactions project moves forward.
|
||||
* This will be changed to a special context when the nested
|
||||
* transactions project moves forward.
|
||||
*/
|
||||
MemoryContext cxt = TopTransactionContext;
|
||||
|
||||
deferredTriggers = (DeferredTriggers) MemoryContextAlloc(TopTransactionContext,
|
||||
sizeof(DeferredTriggersData));
|
||||
sizeof(DeferredTriggersData));
|
||||
|
||||
/*
|
||||
* Create the per transaction memory context
|
||||
*/
|
||||
deferredTriggers->deftrig_cxt = AllocSetContextCreate(cxt,
|
||||
"DeferredTriggerXact",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
"DeferredTriggerXact",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
/*
|
||||
* If unspecified, constraints default to IMMEDIATE, per SQL
|
||||
*/
|
||||
@ -2174,7 +2179,7 @@ DeferredTriggerAbortXact(void)
|
||||
* Ignore call if we aren't in a transaction.
|
||||
*/
|
||||
if (deferredTriggers == NULL)
|
||||
return;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Forget everything we know about deferred triggers.
|
||||
@ -2255,7 +2260,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
if (strlen(cname) == 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
errmsg("unnamed constraints cannot be set explicitly")));
|
||||
errmsg("unnamed constraints cannot be set explicitly")));
|
||||
|
||||
/*
|
||||
* Setup to scan pg_trigger by tgconstrname ...
|
||||
@ -2304,7 +2309,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
if (!found)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("constraint \"%s\" does not exist", cname)));
|
||||
errmsg("constraint \"%s\" does not exist", cname)));
|
||||
}
|
||||
heap_close(tgrel, AccessShareLock);
|
||||
|
||||
@ -2349,9 +2354,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
* CONSTRAINTS command applies retroactively. This happens "for free"
|
||||
* since we have already made the necessary modifications to the
|
||||
* constraints, and deferredTriggerEndQuery() is called by
|
||||
* finish_xact_command(). But we must reset deferredTriggerInvokeEvents'
|
||||
* tail pointer to make it rescan the entire list, in case some deferred
|
||||
* events are now immediately invokable.
|
||||
* finish_xact_command(). But we must reset
|
||||
* deferredTriggerInvokeEvents' tail pointer to make it rescan the
|
||||
* entire list, in case some deferred events are now immediately
|
||||
* invokable.
|
||||
*/
|
||||
deferredTriggers->deftrig_events_imm = NULL;
|
||||
}
|
||||
@ -2416,7 +2422,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
|
||||
*/
|
||||
for (i = 0; i < ntriggers; i++)
|
||||
{
|
||||
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
|
||||
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
|
||||
|
||||
if (trigger->tgenabled)
|
||||
n_enabled_triggers++;
|
||||
@ -2455,7 +2461,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
|
||||
|
||||
ev_item = &(new_event->dte_item[i]);
|
||||
ev_item->dti_tgoid = trigger->tgoid;
|
||||
ev_item->dti_state =
|
||||
ev_item->dti_state =
|
||||
((trigger->tgdeferrable) ?
|
||||
TRIGGER_DEFERRED_DEFERRABLE : 0) |
|
||||
((trigger->tginitdeferred) ?
|
||||
@ -2464,9 +2470,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
|
||||
if (row_trigger && (trigdesc->n_before_row[event] > 0))
|
||||
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
|
||||
else if (!row_trigger && (trigdesc->n_before_statement[event] > 0))
|
||||
{
|
||||
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
|
||||
}
|
||||
}
|
||||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.40 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.41 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@ -66,11 +66,11 @@
|
||||
/* result structure for get_rels_with_domain() */
|
||||
typedef struct
|
||||
{
|
||||
Relation rel; /* opened and locked relation */
|
||||
int natts; /* number of attributes of interest */
|
||||
int *atts; /* attribute numbers */
|
||||
Relation rel; /* opened and locked relation */
|
||||
int natts; /* number of attributes of interest */
|
||||
int *atts; /* attribute numbers */
|
||||
/* atts[] is of allocated length RelationGetNumberOfAttributes(rel) */
|
||||
} RelToCheck;
|
||||
} RelToCheck;
|
||||
|
||||
|
||||
static Oid findTypeInputFunction(List *procname, Oid typeOid);
|
||||
@ -80,9 +80,9 @@ static Oid findTypeSendFunction(List *procname, Oid typeOid);
|
||||
static List *get_rels_with_domain(Oid domainOid, LOCKMODE lockmode);
|
||||
static void domainOwnerCheck(HeapTuple tup, TypeName *typename);
|
||||
static char *domainAddConstraint(Oid domainOid, Oid domainNamespace,
|
||||
Oid baseTypeOid,
|
||||
int typMod, Constraint *constr,
|
||||
int *counter, char *domainName);
|
||||
Oid baseTypeOid,
|
||||
int typMod, Constraint *constr,
|
||||
int *counter, char *domainName);
|
||||
|
||||
|
||||
/*
|
||||
@ -105,7 +105,7 @@ DefineType(List *names, List *parameters)
|
||||
bool byValue = false;
|
||||
char delimiter = DEFAULT_TYPDELIM;
|
||||
char alignment = 'i'; /* default alignment */
|
||||
char storage = 'p'; /* default TOAST storage method */
|
||||
char storage = 'p'; /* default TOAST storage method */
|
||||
Oid inputOid;
|
||||
Oid outputOid;
|
||||
Oid receiveOid = InvalidOid;
|
||||
@ -237,8 +237,8 @@ DefineType(List *names, List *parameters)
|
||||
|
||||
/*
|
||||
* Look to see if type already exists (presumably as a shell; if not,
|
||||
* TypeCreate will complain). If it doesn't, create it as a shell,
|
||||
* so that the OID is known for use in the I/O function definitions.
|
||||
* TypeCreate will complain). If it doesn't, create it as a shell, so
|
||||
* that the OID is known for use in the I/O function definitions.
|
||||
*/
|
||||
typoid = GetSysCacheOid(TYPENAMENSP,
|
||||
CStringGetDatum(typeName),
|
||||
@ -492,7 +492,7 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
List *listptr;
|
||||
Oid basetypeoid;
|
||||
Oid domainoid;
|
||||
Form_pg_type baseType;
|
||||
Form_pg_type baseType;
|
||||
int counter = 0;
|
||||
|
||||
/* Convert list of names to a name and namespace */
|
||||
@ -508,10 +508,11 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
|
||||
/*
|
||||
* Domainnames, unlike typenames don't need to account for the '_'
|
||||
* prefix. So they can be one character longer. (This test is presently
|
||||
* useless since the parser will have truncated the name to fit. But
|
||||
* leave it here since we may someday support arrays of domains, in
|
||||
* which case we'll be back to needing to enforce NAMEDATALEN-2.)
|
||||
* prefix. So they can be one character longer. (This test is
|
||||
* presently useless since the parser will have truncated the name to
|
||||
* fit. But leave it here since we may someday support arrays of
|
||||
* domains, in which case we'll be back to needing to enforce
|
||||
* NAMEDATALEN-2.)
|
||||
*/
|
||||
if (strlen(domainName) > (NAMEDATALEN - 1))
|
||||
ereport(ERROR,
|
||||
@ -581,8 +582,8 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
basetypelem = baseType->typelem;
|
||||
|
||||
/*
|
||||
* Run through constraints manually to avoid the additional
|
||||
* processing conducted by DefineRelation() and friends.
|
||||
* Run through constraints manually to avoid the additional processing
|
||||
* conducted by DefineRelation() and friends.
|
||||
*/
|
||||
foreach(listptr, schema)
|
||||
{
|
||||
@ -594,7 +595,7 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
if (IsA(newConstraint, FkConstraint))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("FOREIGN KEY constraints not supported for domains")));
|
||||
errmsg("FOREIGN KEY constraints not supported for domains")));
|
||||
|
||||
/* otherwise it should be a plain Constraint */
|
||||
if (!IsA(newConstraint, Constraint))
|
||||
@ -606,6 +607,7 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
switch (constr->contype)
|
||||
{
|
||||
case CONSTR_DEFAULT:
|
||||
|
||||
/*
|
||||
* The inherited default value may be overridden by the
|
||||
* user with the DEFAULT <expr> statement.
|
||||
@ -643,7 +645,7 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
if (nullDefined && !typNotNull)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("conflicting NULL/NOT NULL constraints")));
|
||||
errmsg("conflicting NULL/NOT NULL constraints")));
|
||||
typNotNull = true;
|
||||
nullDefined = true;
|
||||
break;
|
||||
@ -652,41 +654,42 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
if (nullDefined && typNotNull)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("conflicting NULL/NOT NULL constraints")));
|
||||
errmsg("conflicting NULL/NOT NULL constraints")));
|
||||
typNotNull = false;
|
||||
nullDefined = true;
|
||||
break;
|
||||
break;
|
||||
|
||||
case CONSTR_CHECK:
|
||||
|
||||
case CONSTR_CHECK:
|
||||
/*
|
||||
* Check constraints are handled after domain creation, as they
|
||||
* require the Oid of the domain
|
||||
* Check constraints are handled after domain creation, as
|
||||
* they require the Oid of the domain
|
||||
*/
|
||||
break;
|
||||
break;
|
||||
|
||||
/*
|
||||
* All else are error cases
|
||||
*/
|
||||
case CONSTR_UNIQUE:
|
||||
ereport(ERROR,
|
||||
case CONSTR_UNIQUE:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("UNIQUE constraints not supported for domains")));
|
||||
break;
|
||||
errmsg("UNIQUE constraints not supported for domains")));
|
||||
break;
|
||||
|
||||
case CONSTR_PRIMARY:
|
||||
ereport(ERROR,
|
||||
case CONSTR_PRIMARY:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("PRIMARY KEY constraints not supported for domains")));
|
||||
break;
|
||||
break;
|
||||
|
||||
case CONSTR_ATTR_DEFERRABLE:
|
||||
case CONSTR_ATTR_NOT_DEFERRABLE:
|
||||
case CONSTR_ATTR_DEFERRED:
|
||||
case CONSTR_ATTR_IMMEDIATE:
|
||||
ereport(ERROR,
|
||||
case CONSTR_ATTR_DEFERRABLE:
|
||||
case CONSTR_ATTR_NOT_DEFERRABLE:
|
||||
case CONSTR_ATTR_DEFERRED:
|
||||
case CONSTR_ATTR_IMMEDIATE:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("deferrability constraints not supported for domains")));
|
||||
break;
|
||||
break;
|
||||
|
||||
default:
|
||||
elog(ERROR, "unrecognized constraint subtype: %d",
|
||||
@ -715,15 +718,16 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
basetypeoid, /* base type ID */
|
||||
defaultValue, /* default type value (text) */
|
||||
defaultValueBin, /* default type value (binary) */
|
||||
byValue, /* passed by value */
|
||||
alignment, /* required alignment */
|
||||
storage, /* TOAST strategy */
|
||||
stmt->typename->typmod, /* typeMod value */
|
||||
typNDims, /* Array dimensions for base type */
|
||||
typNotNull); /* Type NOT NULL */
|
||||
byValue, /* passed by value */
|
||||
alignment, /* required alignment */
|
||||
storage, /* TOAST strategy */
|
||||
stmt->typename->typmod, /* typeMod value */
|
||||
typNDims, /* Array dimensions for base type */
|
||||
typNotNull); /* Type NOT NULL */
|
||||
|
||||
/*
|
||||
* Process constraints which refer to the domain ID returned by TypeCreate
|
||||
* Process constraints which refer to the domain ID returned by
|
||||
* TypeCreate
|
||||
*/
|
||||
foreach(listptr, schema)
|
||||
{
|
||||
@ -733,16 +737,16 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
|
||||
switch (constr->contype)
|
||||
{
|
||||
case CONSTR_CHECK:
|
||||
case CONSTR_CHECK:
|
||||
domainAddConstraint(domainoid, domainNamespace,
|
||||
basetypeoid, stmt->typename->typmod,
|
||||
constr, &counter, domainName);
|
||||
break;
|
||||
break;
|
||||
|
||||
/* Other constraint types were fully processed above */
|
||||
/* Other constraint types were fully processed above */
|
||||
|
||||
default:
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -834,8 +838,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
|
||||
* Input functions can take a single argument of type CSTRING, or
|
||||
* three arguments (string, element OID, typmod).
|
||||
*
|
||||
* For backwards compatibility we allow OPAQUE in place of CSTRING;
|
||||
* if we see this, we issue a NOTICE and fix up the pg_proc entry.
|
||||
* For backwards compatibility we allow OPAQUE in place of CSTRING; if we
|
||||
* see this, we issue a NOTICE and fix up the pg_proc entry.
|
||||
*/
|
||||
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
|
||||
|
||||
@ -874,9 +878,10 @@ findTypeInputFunction(List *procname, Oid typeOid)
|
||||
(errmsg("changing argument type of function %s from OPAQUE to CSTRING",
|
||||
NameListToString(procname))));
|
||||
SetFunctionArgType(procOid, 0, CSTRINGOID);
|
||||
|
||||
/*
|
||||
* Need CommandCounterIncrement since DefineType will likely
|
||||
* try to alter the pg_proc tuple again.
|
||||
* Need CommandCounterIncrement since DefineType will likely try
|
||||
* to alter the pg_proc tuple again.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
@ -905,8 +910,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
|
||||
* arguments (data value, element OID).
|
||||
*
|
||||
* For backwards compatibility we allow OPAQUE in place of the actual
|
||||
* type name; if we see this, we issue a NOTICE and fix up the
|
||||
* pg_proc entry.
|
||||
* type name; if we see this, we issue a NOTICE and fix up the pg_proc
|
||||
* entry.
|
||||
*/
|
||||
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
|
||||
|
||||
@ -940,12 +945,13 @@ findTypeOutputFunction(List *procname, Oid typeOid)
|
||||
{
|
||||
/* Found, but must complain and fix the pg_proc entry */
|
||||
ereport(NOTICE,
|
||||
(errmsg("changing argument type of function %s from OPAQUE to %s",
|
||||
NameListToString(procname), format_type_be(typeOid))));
|
||||
(errmsg("changing argument type of function %s from OPAQUE to %s",
|
||||
NameListToString(procname), format_type_be(typeOid))));
|
||||
SetFunctionArgType(procOid, 0, typeOid);
|
||||
|
||||
/*
|
||||
* Need CommandCounterIncrement since DefineType will likely
|
||||
* try to alter the pg_proc tuple again.
|
||||
* Need CommandCounterIncrement since DefineType will likely try
|
||||
* to alter the pg_proc tuple again.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
@ -1050,7 +1056,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
|
||||
if (coldeflist == NIL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("composite type must have at least one attribute")));
|
||||
errmsg("composite type must have at least one attribute")));
|
||||
|
||||
/*
|
||||
* now create the parameters for keys/inheritance etc. All of them are
|
||||
@ -1072,7 +1078,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
|
||||
/*
|
||||
* AlterDomainDefault
|
||||
*
|
||||
* Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
|
||||
* Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
|
||||
*/
|
||||
void
|
||||
AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
@ -1083,12 +1089,12 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
ParseState *pstate;
|
||||
Relation rel;
|
||||
char *defaultValue;
|
||||
Node *defaultExpr = NULL; /* NULL if no default specified */
|
||||
Node *defaultExpr = NULL; /* NULL if no default specified */
|
||||
Datum new_record[Natts_pg_type];
|
||||
char new_record_nulls[Natts_pg_type];
|
||||
char new_record_repl[Natts_pg_type];
|
||||
HeapTuple newtuple;
|
||||
Form_pg_type typTup;
|
||||
Form_pg_type typTup;
|
||||
|
||||
/* Make a TypeName so we can use standard type lookup machinery */
|
||||
typename = makeNode(TypeName);
|
||||
@ -1113,7 +1119,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
if (!HeapTupleIsValid(tup))
|
||||
elog(ERROR, "cache lookup failed for type %u", domainoid);
|
||||
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
domainOwnerCheck(tup, typename);
|
||||
|
||||
/* Setup new tuple */
|
||||
@ -1129,9 +1135,10 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
{
|
||||
/* Create a dummy ParseState for transformExpr */
|
||||
pstate = make_parsestate(NULL);
|
||||
|
||||
/*
|
||||
* Cook the colDef->raw_expr into an expression. Note:
|
||||
* Name is strictly for error message
|
||||
* Cook the colDef->raw_expr into an expression. Note: Name is
|
||||
* strictly for error message
|
||||
*/
|
||||
defaultExpr = cookDefault(pstate, defaultRaw,
|
||||
typTup->typbasetype,
|
||||
@ -1139,27 +1146,29 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
NameStr(typTup->typname));
|
||||
|
||||
/*
|
||||
* Expression must be stored as a nodeToString result, but
|
||||
* we also require a valid textual representation (mainly
|
||||
* to make life easier for pg_dump).
|
||||
* Expression must be stored as a nodeToString result, but we also
|
||||
* require a valid textual representation (mainly to make life
|
||||
* easier for pg_dump).
|
||||
*/
|
||||
defaultValue = deparse_expression(defaultExpr,
|
||||
deparse_context_for(NameStr(typTup->typname),
|
||||
InvalidOid),
|
||||
deparse_context_for(NameStr(typTup->typname),
|
||||
InvalidOid),
|
||||
false, false);
|
||||
|
||||
/*
|
||||
* Form an updated tuple with the new default and write it back.
|
||||
*/
|
||||
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
|
||||
CStringGetDatum(
|
||||
nodeToString(defaultExpr)));
|
||||
CStringGetDatum(
|
||||
nodeToString(defaultExpr)));
|
||||
|
||||
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
|
||||
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
|
||||
CStringGetDatum(defaultValue));
|
||||
CStringGetDatum(defaultValue));
|
||||
new_record_repl[Anum_pg_type_typdefault - 1] = 'r';
|
||||
}
|
||||
else /* Default is NULL, drop it */
|
||||
else
|
||||
/* Default is NULL, drop it */
|
||||
{
|
||||
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
|
||||
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
|
||||
@ -1168,7 +1177,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
}
|
||||
|
||||
newtuple = heap_modifytuple(tup, rel,
|
||||
new_record, new_record_nulls, new_record_repl);
|
||||
new_record, new_record_nulls, new_record_repl);
|
||||
|
||||
simple_heap_update(rel, &tup->t_self, newtuple);
|
||||
|
||||
@ -1178,7 +1187,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
GenerateTypeDependencies(typTup->typnamespace,
|
||||
domainoid,
|
||||
typTup->typrelid,
|
||||
0, /* relation kind is n/a */
|
||||
0, /* relation kind is n/a */
|
||||
typTup->typinput,
|
||||
typTup->typoutput,
|
||||
typTup->typreceive,
|
||||
@ -1186,7 +1195,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
typTup->typelem,
|
||||
typTup->typbasetype,
|
||||
defaultExpr,
|
||||
true); /* Rebuild is true */
|
||||
true); /* Rebuild is true */
|
||||
|
||||
/* Clean up */
|
||||
heap_close(rel, NoLock);
|
||||
@ -1196,7 +1205,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
|
||||
/*
|
||||
* AlterDomainNotNull
|
||||
*
|
||||
* Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
|
||||
* Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
|
||||
*/
|
||||
void
|
||||
AlterDomainNotNull(List *names, bool notNull)
|
||||
@ -1205,7 +1214,7 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
Oid domainoid;
|
||||
Relation typrel;
|
||||
HeapTuple tup;
|
||||
Form_pg_type typTup;
|
||||
Form_pg_type typTup;
|
||||
|
||||
/* Make a TypeName so we can use standard type lookup machinery */
|
||||
typename = makeNode(TypeName);
|
||||
@ -1231,7 +1240,7 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
elog(ERROR, "cache lookup failed for type %u", domainoid);
|
||||
typTup = (Form_pg_type) GETSTRUCT(tup);
|
||||
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
domainOwnerCheck(tup, typename);
|
||||
|
||||
/* Is the domain already set to the desired constraint? */
|
||||
@ -1248,15 +1257,15 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
/* Adding a NOT NULL constraint requires checking existing columns */
|
||||
if (notNull)
|
||||
{
|
||||
List *rels;
|
||||
List *rt;
|
||||
List *rels;
|
||||
List *rt;
|
||||
|
||||
/* Fetch relation list with attributes based on this domain */
|
||||
/* ShareLock is sufficient to prevent concurrent data changes */
|
||||
|
||||
rels = get_rels_with_domain(domainoid, ShareLock);
|
||||
|
||||
foreach (rt, rels)
|
||||
foreach(rt, rels)
|
||||
{
|
||||
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
|
||||
Relation testrel = rtc->rel;
|
||||
@ -1268,14 +1277,14 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
/* Test attributes that are of the domain */
|
||||
for (i = 0; i < rtc->natts; i++)
|
||||
{
|
||||
int attnum = rtc->atts[i];
|
||||
Datum d;
|
||||
bool isNull;
|
||||
int attnum = rtc->atts[i];
|
||||
Datum d;
|
||||
bool isNull;
|
||||
|
||||
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
|
||||
|
||||
@ -1284,7 +1293,7 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
(errcode(ERRCODE_NOT_NULL_VIOLATION),
|
||||
errmsg("relation \"%s\" attribute \"%s\" contains NULL values",
|
||||
RelationGetRelationName(testrel),
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname))));
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname))));
|
||||
}
|
||||
}
|
||||
heap_endscan(scan);
|
||||
@ -1295,7 +1304,7 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
}
|
||||
|
||||
/*
|
||||
* Okay to update pg_type row. We can scribble on typTup because it's
|
||||
* Okay to update pg_type row. We can scribble on typTup because it's
|
||||
* a copy.
|
||||
*/
|
||||
typTup->typnotnull = notNull;
|
||||
@ -1321,7 +1330,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
|
||||
Oid domainoid;
|
||||
HeapTuple tup;
|
||||
Relation rel;
|
||||
Form_pg_type typTup;
|
||||
Form_pg_type typTup;
|
||||
Relation conrel;
|
||||
SysScanDesc conscan;
|
||||
ScanKeyData key[1];
|
||||
@ -1350,7 +1359,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
|
||||
if (!HeapTupleIsValid(tup))
|
||||
elog(ERROR, "cache lookup failed for type %u", domainoid);
|
||||
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
domainOwnerCheck(tup, typename);
|
||||
|
||||
/* Grab an appropriate lock on the pg_constraint relation */
|
||||
@ -1403,15 +1412,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
Oid domainoid;
|
||||
Relation typrel;
|
||||
HeapTuple tup;
|
||||
Form_pg_type typTup;
|
||||
List *rels;
|
||||
List *rt;
|
||||
EState *estate;
|
||||
Form_pg_type typTup;
|
||||
List *rels;
|
||||
List *rt;
|
||||
EState *estate;
|
||||
ExprContext *econtext;
|
||||
char *ccbin;
|
||||
Expr *expr;
|
||||
ExprState *exprstate;
|
||||
int counter = 0;
|
||||
char *ccbin;
|
||||
Expr *expr;
|
||||
ExprState *exprstate;
|
||||
int counter = 0;
|
||||
Constraint *constr;
|
||||
|
||||
/* Make a TypeName so we can use standard type lookup machinery */
|
||||
@ -1438,14 +1447,14 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
elog(ERROR, "cache lookup failed for type %u", domainoid);
|
||||
typTup = (Form_pg_type) GETSTRUCT(tup);
|
||||
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
/* Doesn't return if user isn't allowed to alter the domain */
|
||||
domainOwnerCheck(tup, typename);
|
||||
|
||||
/* Check for unsupported constraint types */
|
||||
if (IsA(newConstraint, FkConstraint))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("FOREIGN KEY constraints not supported for domains")));
|
||||
errmsg("FOREIGN KEY constraints not supported for domains")));
|
||||
|
||||
/* otherwise it should be a plain Constraint */
|
||||
if (!IsA(newConstraint, Constraint))
|
||||
@ -1469,20 +1478,20 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
errmsg("use ALTER DOMAIN .. [ SET | DROP ] NOT NULL instead")));
|
||||
break;
|
||||
|
||||
case CONSTR_CHECK:
|
||||
case CONSTR_CHECK:
|
||||
/* processed below */
|
||||
break;
|
||||
break;
|
||||
|
||||
case CONSTR_UNIQUE:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("UNIQUE constraints not supported for domains")));
|
||||
errmsg("UNIQUE constraints not supported for domains")));
|
||||
break;
|
||||
|
||||
case CONSTR_PRIMARY:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("PRIMARY KEY constraints not supported for domains")));
|
||||
errmsg("PRIMARY KEY constraints not supported for domains")));
|
||||
break;
|
||||
|
||||
case CONSTR_ATTR_DEFERRABLE:
|
||||
@ -1501,18 +1510,18 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
}
|
||||
|
||||
/*
|
||||
* Since all other constraint types throw errors, this must be
|
||||
* a check constraint. First, process the constraint expression
|
||||
* and add an entry to pg_constraint.
|
||||
* Since all other constraint types throw errors, this must be a check
|
||||
* constraint. First, process the constraint expression and add an
|
||||
* entry to pg_constraint.
|
||||
*/
|
||||
|
||||
ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace,
|
||||
typTup->typbasetype, typTup->typtypmod,
|
||||
constr, &counter, NameStr(typTup->typname));
|
||||
constr, &counter, NameStr(typTup->typname));
|
||||
|
||||
/*
|
||||
* Test all values stored in the attributes based on the domain
|
||||
* the constraint is being added to.
|
||||
* Test all values stored in the attributes based on the domain the
|
||||
* constraint is being added to.
|
||||
*/
|
||||
expr = (Expr *) stringToNode(ccbin);
|
||||
|
||||
@ -1528,7 +1537,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
|
||||
rels = get_rels_with_domain(domainoid, ShareLock);
|
||||
|
||||
foreach (rt, rels)
|
||||
foreach(rt, rels)
|
||||
{
|
||||
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
|
||||
Relation testrel = rtc->rel;
|
||||
@ -1540,15 +1549,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
/* Test attributes that are of the domain */
|
||||
for (i = 0; i < rtc->natts; i++)
|
||||
{
|
||||
int attnum = rtc->atts[i];
|
||||
Datum d;
|
||||
bool isNull;
|
||||
Datum conResult;
|
||||
int attnum = rtc->atts[i];
|
||||
Datum d;
|
||||
bool isNull;
|
||||
Datum conResult;
|
||||
|
||||
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
|
||||
|
||||
@ -1564,7 +1573,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
(errcode(ERRCODE_CHECK_VIOLATION),
|
||||
errmsg("relation \"%s\" attribute \"%s\" contains values that violate the new constraint",
|
||||
RelationGetRelationName(testrel),
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname))));
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname))));
|
||||
}
|
||||
|
||||
ResetExprContext(econtext);
|
||||
@ -1610,7 +1619,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
static List *
|
||||
get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
|
||||
{
|
||||
List *result = NIL;
|
||||
List *result = NIL;
|
||||
Relation depRel;
|
||||
ScanKeyData key[2];
|
||||
SysScanDesc depScan;
|
||||
@ -1634,10 +1643,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
|
||||
|
||||
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
|
||||
{
|
||||
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
|
||||
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
|
||||
RelToCheck *rtc = NULL;
|
||||
List *rellist;
|
||||
Form_pg_attribute pg_att;
|
||||
Form_pg_attribute pg_att;
|
||||
int ptr;
|
||||
|
||||
/* Ignore dependees that aren't user columns of tables */
|
||||
@ -1675,10 +1684,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
|
||||
}
|
||||
|
||||
/*
|
||||
* Confirm column has not been dropped, and is of the expected type.
|
||||
* This defends against an ALTER DROP COLUMN occuring just before
|
||||
* we acquired lock ... but if the whole table were dropped, we'd
|
||||
* still have a problem.
|
||||
* Confirm column has not been dropped, and is of the expected
|
||||
* type. This defends against an ALTER DROP COLUMN occuring just
|
||||
* before we acquired lock ... but if the whole table were
|
||||
* dropped, we'd still have a problem.
|
||||
*/
|
||||
if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel))
|
||||
continue;
|
||||
@ -1687,16 +1696,16 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Okay, add column to result. We store the columns in column-number
|
||||
* order; this is just a hack to improve predictability of regression
|
||||
* test output ...
|
||||
* Okay, add column to result. We store the columns in
|
||||
* column-number order; this is just a hack to improve
|
||||
* predictability of regression test output ...
|
||||
*/
|
||||
Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel));
|
||||
|
||||
ptr = rtc->natts++;
|
||||
while (ptr > 0 && rtc->atts[ptr-1] > pg_depend->objsubid)
|
||||
while (ptr > 0 && rtc->atts[ptr - 1] > pg_depend->objsubid)
|
||||
{
|
||||
rtc->atts[ptr] = rtc->atts[ptr-1];
|
||||
rtc->atts[ptr] = rtc->atts[ptr - 1];
|
||||
ptr--;
|
||||
}
|
||||
rtc->atts[ptr] = pg_depend->objsubid;
|
||||
@ -1719,7 +1728,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
|
||||
static void
|
||||
domainOwnerCheck(HeapTuple tup, TypeName *typename)
|
||||
{
|
||||
Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
|
||||
Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
|
||||
|
||||
/* Check that this is actually a domain */
|
||||
if (typTup->typtype != 'd')
|
||||
@ -1746,7 +1755,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
|
||||
char *ccsrc;
|
||||
char *ccbin;
|
||||
ParseState *pstate;
|
||||
CoerceToDomainValue *domVal;
|
||||
CoerceToDomainValue *domVal;
|
||||
|
||||
/*
|
||||
* Assign or validate constraint name
|
||||
@ -1759,8 +1768,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
|
||||
constr->name))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("constraint \"%s\" for domain \"%s\" already exists",
|
||||
constr->name, domainName)));
|
||||
errmsg("constraint \"%s\" for domain \"%s\" already exists",
|
||||
constr->name, domainName)));
|
||||
}
|
||||
else
|
||||
constr->name = GenerateConstraintName(CONSTRAINT_DOMAIN,
|
||||
@ -1775,10 +1784,10 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
|
||||
|
||||
/*
|
||||
* Set up a CoerceToDomainValue to represent the occurrence of VALUE
|
||||
* in the expression. Note that it will appear to have the type of the
|
||||
* base type, not the domain. This seems correct since within the
|
||||
* check expression, we should not assume the input value can be considered
|
||||
* a member of the domain.
|
||||
* in the expression. Note that it will appear to have the type of
|
||||
* the base type, not the domain. This seems correct since within the
|
||||
* check expression, we should not assume the input value can be
|
||||
* considered a member of the domain.
|
||||
*/
|
||||
domVal = makeNode(CoerceToDomainValue);
|
||||
domVal->typeId = baseTypeOid;
|
||||
@ -1841,13 +1850,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
|
||||
/*
|
||||
* Store the constraint in pg_constraint
|
||||
*/
|
||||
CreateConstraintEntry(constr->name, /* Constraint Name */
|
||||
domainNamespace, /* namespace */
|
||||
CreateConstraintEntry(constr->name, /* Constraint Name */
|
||||
domainNamespace, /* namespace */
|
||||
CONSTRAINT_CHECK, /* Constraint Type */
|
||||
false, /* Is Deferrable */
|
||||
false, /* Is Deferred */
|
||||
InvalidOid, /* not a relation constraint */
|
||||
NULL,
|
||||
InvalidOid, /* not a relation constraint */
|
||||
NULL,
|
||||
0,
|
||||
domainOid, /* domain constraint */
|
||||
InvalidOid, /* Foreign key fields */
|
||||
@ -1857,13 +1866,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
|
||||
' ',
|
||||
' ',
|
||||
InvalidOid,
|
||||
expr, /* Tree form check constraint */
|
||||
expr, /* Tree form check constraint */
|
||||
ccbin, /* Binary form check constraint */
|
||||
ccsrc); /* Source form check constraint */
|
||||
ccsrc); /* Source form check constraint */
|
||||
|
||||
/*
|
||||
* Return the compiled constraint expression so the calling routine can
|
||||
* perform any additional required tests.
|
||||
* Return the compiled constraint expression so the calling routine
|
||||
* can perform any additional required tests.
|
||||
*/
|
||||
return ccbin;
|
||||
}
|
||||
@ -1893,7 +1902,7 @@ GetDomainConstraints(Oid typeOid)
|
||||
Form_pg_type typTup;
|
||||
ScanKeyData key[1];
|
||||
SysScanDesc scan;
|
||||
|
||||
|
||||
tup = SearchSysCache(TYPEOID,
|
||||
ObjectIdGetDatum(typeOid),
|
||||
0, 0, 0);
|
||||
@ -1915,17 +1924,20 @@ GetDomainConstraints(Oid typeOid)
|
||||
|
||||
while (HeapTupleIsValid(conTup = systable_getnext(scan)))
|
||||
{
|
||||
Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
|
||||
Datum val;
|
||||
bool isNull;
|
||||
Expr *check_expr;
|
||||
Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
|
||||
Datum val;
|
||||
bool isNull;
|
||||
Expr *check_expr;
|
||||
DomainConstraintState *r;
|
||||
|
||||
/* Ignore non-CHECK constraints (presently, shouldn't be any) */
|
||||
if (c->contype != CONSTRAINT_CHECK)
|
||||
continue;
|
||||
|
||||
/* Not expecting conbin to be NULL, but we'll test for it anyway */
|
||||
/*
|
||||
* Not expecting conbin to be NULL, but we'll test for it
|
||||
* anyway
|
||||
*/
|
||||
val = fastgetattr(conTup, Anum_pg_constraint_conbin,
|
||||
conRel->rd_att, &isNull);
|
||||
if (isNull)
|
||||
@ -1945,8 +1957,8 @@ GetDomainConstraints(Oid typeOid)
|
||||
r->check_expr = ExecInitExpr(check_expr, NULL);
|
||||
|
||||
/*
|
||||
* use lcons() here because constraints of lower domains should
|
||||
* be applied earlier.
|
||||
* use lcons() here because constraints of lower domains
|
||||
* should be applied earlier.
|
||||
*/
|
||||
result = lcons(r, result);
|
||||
}
|
||||
@ -2003,7 +2015,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
|
||||
Oid typeOid;
|
||||
Relation rel;
|
||||
HeapTuple tup;
|
||||
Form_pg_type typTup;
|
||||
Form_pg_type typTup;
|
||||
|
||||
/* Make a TypeName so we can use standard type lookup machinery */
|
||||
typename = makeNode(TypeName);
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.122 2003/08/01 00:15:19 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.123 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -146,12 +146,12 @@ write_group_file(Relation grel)
|
||||
if (fp == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
|
||||
/*
|
||||
* Read pg_group and write the file. Note we use SnapshotSelf to ensure
|
||||
* we see all effects of current transaction. (Perhaps could do a
|
||||
* CommandCounterIncrement beforehand, instead?)
|
||||
* Read pg_group and write the file. Note we use SnapshotSelf to
|
||||
* ensure we see all effects of current transaction. (Perhaps could
|
||||
* do a CommandCounterIncrement beforehand, instead?)
|
||||
*/
|
||||
scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
@ -212,7 +212,7 @@ write_group_file(Relation grel)
|
||||
if (usename[j] != '\0')
|
||||
{
|
||||
ereport(LOG,
|
||||
(errmsg("invalid user name \"%s\"", usename)));
|
||||
(errmsg("invalid user name \"%s\"", usename)));
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ write_group_file(Relation grel)
|
||||
if (ferror(fp))
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
FreeFile(fp);
|
||||
|
||||
/*
|
||||
@ -294,12 +294,12 @@ write_user_file(Relation urel)
|
||||
if (fp == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
|
||||
/*
|
||||
* Read pg_shadow and write the file. Note we use SnapshotSelf to ensure
|
||||
* we see all effects of current transaction. (Perhaps could do a
|
||||
* CommandCounterIncrement beforehand, instead?)
|
||||
* Read pg_shadow and write the file. Note we use SnapshotSelf to
|
||||
* ensure we see all effects of current transaction. (Perhaps could
|
||||
* do a CommandCounterIncrement beforehand, instead?)
|
||||
*/
|
||||
scan = heap_beginscan(urel, SnapshotSelf, 0, NULL);
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
@ -376,7 +376,7 @@ write_user_file(Relation urel)
|
||||
if (ferror(fp))
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
errmsg("could not write temp file \"%s\": %m", tempname)));
|
||||
FreeFile(fp);
|
||||
|
||||
/*
|
||||
@ -430,10 +430,10 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
|
||||
Relation urel = NULL;
|
||||
Relation grel = NULL;
|
||||
|
||||
if (! (user_file_update_needed || group_file_update_needed))
|
||||
if (!(user_file_update_needed || group_file_update_needed))
|
||||
return;
|
||||
|
||||
if (! isCommit)
|
||||
if (!isCommit)
|
||||
{
|
||||
user_file_update_needed = false;
|
||||
group_file_update_needed = false;
|
||||
@ -441,12 +441,12 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
|
||||
}
|
||||
|
||||
/*
|
||||
* We use ExclusiveLock to ensure that only one backend writes the flat
|
||||
* file(s) at a time. That's sufficient because it's okay to allow plain
|
||||
* reads of the tables in parallel. There is some chance of a deadlock
|
||||
* here (if we were triggered by a user update of pg_shadow or pg_group,
|
||||
* which likely won't have gotten a strong enough lock), so get the locks
|
||||
* we need before writing anything.
|
||||
* We use ExclusiveLock to ensure that only one backend writes the
|
||||
* flat file(s) at a time. That's sufficient because it's okay to
|
||||
* allow plain reads of the tables in parallel. There is some chance
|
||||
* of a deadlock here (if we were triggered by a user update of
|
||||
* pg_shadow or pg_group, which likely won't have gotten a strong
|
||||
* enough lock), so get the locks we need before writing anything.
|
||||
*/
|
||||
if (user_file_update_needed)
|
||||
urel = heap_openr(ShadowRelationName, ExclusiveLock);
|
||||
@ -1088,7 +1088,7 @@ DropUser(DropUserStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_IN_USE),
|
||||
errmsg("user \"%s\" cannot be dropped", user),
|
||||
errdetail("The user owns database \"%s\".", dbname)));
|
||||
errdetail("The user owns database \"%s\".", dbname)));
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
@ -1172,10 +1172,10 @@ RenameUser(const char *oldname, const char *newname)
|
||||
errmsg("user \"%s\" does not exist", oldname)));
|
||||
|
||||
/*
|
||||
* XXX Client applications probably store the session user
|
||||
* somewhere, so renaming it could cause confusion. On the other
|
||||
* hand, there may not be an actual problem besides a little
|
||||
* confusion, so think about this and decide.
|
||||
* XXX Client applications probably store the session user somewhere,
|
||||
* so renaming it could cause confusion. On the other hand, there may
|
||||
* not be an actual problem besides a little confusion, so think about
|
||||
* this and decide.
|
||||
*/
|
||||
if (((Form_pg_shadow) GETSTRUCT(tup))->usesysid == GetSessionUserId())
|
||||
ereport(ERROR,
|
||||
@ -1221,14 +1221,14 @@ CheckPgUserAclNotNull(void)
|
||||
htup = SearchSysCache(RELOID,
|
||||
ObjectIdGetDatum(RelOid_pg_shadow),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
|
||||
if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
|
||||
elog(ERROR, "cache lookup failed for relation %u", RelOid_pg_shadow);
|
||||
|
||||
if (heap_attisnull(htup, Anum_pg_class_relacl))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("before using passwords you must revoke permissions on %s",
|
||||
ShadowRelationName),
|
||||
errmsg("before using passwords you must revoke permissions on %s",
|
||||
ShadowRelationName),
|
||||
errdetail("This restriction is to prevent unprivileged users from reading the passwords."),
|
||||
errhint("Try 'REVOKE ALL ON \"%s\" FROM PUBLIC'.",
|
||||
ShadowRelationName)));
|
||||
|
@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.257 2003/07/20 21:56:34 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.258 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -287,24 +287,25 @@ vacuum(VacuumStmt *vacstmt)
|
||||
|
||||
if (vacstmt->vacuum)
|
||||
{
|
||||
if (! vacuum_rel(relid, vacstmt, RELKIND_RELATION))
|
||||
all_rels = false; /* forget about updating dbstats */
|
||||
if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
|
||||
all_rels = false; /* forget about updating dbstats */
|
||||
}
|
||||
if (vacstmt->analyze)
|
||||
{
|
||||
MemoryContext old_context = NULL;
|
||||
|
||||
/*
|
||||
* If we vacuumed, use new transaction for analyze.
|
||||
* Otherwise, we can use the outer transaction, but we still
|
||||
* need to call analyze_rel in a memory context that will be
|
||||
* cleaned up on return (else we leak memory while processing
|
||||
* multiple tables).
|
||||
* If we vacuumed, use new transaction for analyze. Otherwise,
|
||||
* we can use the outer transaction, but we still need to call
|
||||
* analyze_rel in a memory context that will be cleaned up on
|
||||
* return (else we leak memory while processing multiple
|
||||
* tables).
|
||||
*/
|
||||
if (vacstmt->vacuum)
|
||||
{
|
||||
StartTransactionCommand();
|
||||
SetQuerySnapshot(); /* might be needed for functions in indexes */
|
||||
SetQuerySnapshot(); /* might be needed for functions
|
||||
* in indexes */
|
||||
}
|
||||
else
|
||||
old_context = MemoryContextSwitchTo(anl_context);
|
||||
@ -734,7 +735,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
|
||||
|
||||
/* Begin a transaction for vacuuming this relation */
|
||||
StartTransactionCommand();
|
||||
SetQuerySnapshot(); /* might be needed for functions in indexes */
|
||||
SetQuerySnapshot(); /* might be needed for functions in
|
||||
* indexes */
|
||||
|
||||
/*
|
||||
* Check for user-requested abort. Note we want this to be inside a
|
||||
@ -812,7 +814,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
|
||||
{
|
||||
relation_close(onerel, lmode);
|
||||
CommitTransactionCommand();
|
||||
return true; /* assume no long-lived data in temp tables */
|
||||
return true; /* assume no long-lived data in temp
|
||||
* tables */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -860,7 +863,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
|
||||
*/
|
||||
if (toast_relid != InvalidOid)
|
||||
{
|
||||
if (! vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
|
||||
if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
|
||||
result = false; /* failed to vacuum the TOAST table? */
|
||||
}
|
||||
|
||||
@ -1087,8 +1090,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
if (PageIsNew(page))
|
||||
{
|
||||
ereport(WARNING,
|
||||
(errmsg("relation \"%s\" page %u is uninitialized --- fixing",
|
||||
relname, blkno)));
|
||||
(errmsg("relation \"%s\" page %u is uninitialized --- fixing",
|
||||
relname, blkno)));
|
||||
PageInit(page, BufferGetPageSize(buf), 0);
|
||||
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
|
||||
free_space += vacpage->free;
|
||||
@ -1314,7 +1317,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
|
||||
/*
|
||||
* Include the page in empty_end_pages if it will be empty after
|
||||
* vacuuming; this is to keep us from using it as a move destination.
|
||||
* vacuuming; this is to keep us from using it as a move
|
||||
* destination.
|
||||
*/
|
||||
if (notup)
|
||||
{
|
||||
@ -1382,9 +1386,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
RelationGetRelationName(onerel),
|
||||
tups_vacuumed, num_tuples, nblocks),
|
||||
errdetail("%.0f dead tuples cannot be removed yet.\n"
|
||||
"Nonremovable tuples range from %lu to %lu bytes long.\n"
|
||||
"Nonremovable tuples range from %lu to %lu bytes long.\n"
|
||||
"There were %.0f unused item pointers.\n"
|
||||
"Total free space (including removable tuples) is %.0f bytes.\n"
|
||||
"Total free space (including removable tuples) is %.0f bytes.\n"
|
||||
"%u pages are or will become empty, including %u at the end of the table.\n"
|
||||
"%u pages containing %.0f free bytes are potential move destinations.\n"
|
||||
"%s",
|
||||
@ -2380,8 +2384,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
/*
|
||||
* It'd be cleaner to make this report at the bottom of this routine,
|
||||
* but then the rusage would double-count the second pass of index
|
||||
* vacuuming. So do it here and ignore the relatively small amount
|
||||
* of processing that occurs below.
|
||||
* vacuuming. So do it here and ignore the relatively small amount of
|
||||
* processing that occurs below.
|
||||
*/
|
||||
ereport(elevel,
|
||||
(errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
|
||||
@ -2735,7 +2739,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%.0f index tuples were removed.\n"
|
||||
"%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->tuples_removed,
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
@ -2752,7 +2756,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
ereport(WARNING,
|
||||
(errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples, num_tuples + keep_tuples),
|
||||
stats->num_index_tuples, num_tuples + keep_tuples),
|
||||
errhint("Rebuild the index with REINDEX.")));
|
||||
}
|
||||
|
||||
@ -2837,13 +2841,14 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
|
||||
|
||||
/*
|
||||
* We only report pages with free space at least equal to the average
|
||||
* request size --- this avoids cluttering FSM with uselessly-small bits
|
||||
* of space. Although FSM would discard pages with little free space
|
||||
* anyway, it's important to do this prefiltering because (a) it reduces
|
||||
* the time spent holding the FSM lock in RecordRelationFreeSpace, and
|
||||
* (b) FSM uses the number of pages reported as a statistic for guiding
|
||||
* space management. If we didn't threshold our reports the same way
|
||||
* vacuumlazy.c does, we'd be skewing that statistic.
|
||||
* request size --- this avoids cluttering FSM with uselessly-small
|
||||
* bits of space. Although FSM would discard pages with little free
|
||||
* space anyway, it's important to do this prefiltering because (a) it
|
||||
* reduces the time spent holding the FSM lock in
|
||||
* RecordRelationFreeSpace, and (b) FSM uses the number of pages
|
||||
* reported as a statistic for guiding space management. If we didn't
|
||||
* threshold our reports the same way vacuumlazy.c does, we'd be
|
||||
* skewing that statistic.
|
||||
*/
|
||||
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.29 2003/07/20 21:56:34 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.30 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -79,7 +79,7 @@ typedef struct LVRelStats
|
||||
bool fs_is_heap; /* are we using heap organization? */
|
||||
int num_free_pages; /* current # of entries */
|
||||
int max_free_pages; /* # slots allocated in array */
|
||||
PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
|
||||
PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
|
||||
} LVRelStats;
|
||||
|
||||
|
||||
@ -162,7 +162,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
|
||||
*/
|
||||
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
|
||||
if (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
|
||||
possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
|
||||
possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
|
||||
lazy_truncate_heap(onerel, vacrelstats);
|
||||
|
||||
/* Update shared free space map with final free space info */
|
||||
@ -659,7 +659,7 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%.0f index tuples were removed.\n"
|
||||
"%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->tuples_removed,
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
@ -966,16 +966,18 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
/*
|
||||
* A page with less than stats->threshold free space will be forgotten
|
||||
* immediately, and never passed to the free space map. Removing the
|
||||
* uselessly small entries early saves cycles, and in particular reduces
|
||||
* the amount of time we spend holding the FSM lock when we finally call
|
||||
* RecordRelationFreeSpace. Since the FSM will probably drop pages with
|
||||
* little free space anyway, there's no point in making this really small.
|
||||
* uselessly small entries early saves cycles, and in particular
|
||||
* reduces the amount of time we spend holding the FSM lock when we
|
||||
* finally call RecordRelationFreeSpace. Since the FSM will probably
|
||||
* drop pages with little free space anyway, there's no point in
|
||||
* making this really small.
|
||||
*
|
||||
* XXX Is it worth trying to measure average tuple size, and using that to
|
||||
* adjust the threshold? Would be worthwhile if FSM has no stats yet
|
||||
* for this relation. But changing the threshold as we scan the rel
|
||||
* might lead to bizarre behavior, too. Also, it's probably better if
|
||||
* vacuum.c has the same thresholding behavior as we do here.
|
||||
* XXX Is it worth trying to measure average tuple size, and using that
|
||||
* to adjust the threshold? Would be worthwhile if FSM has no stats
|
||||
* yet for this relation. But changing the threshold as we scan the
|
||||
* rel might lead to bizarre behavior, too. Also, it's probably
|
||||
* better if vacuum.c has the same thresholding behavior as we do
|
||||
* here.
|
||||
*/
|
||||
if (avail < vacrelstats->threshold)
|
||||
return;
|
||||
@ -996,7 +998,7 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
/*----------
|
||||
* The rest of this routine works with "heap" organization of the
|
||||
* free space arrays, wherein we maintain the heap property
|
||||
* avail[(j-1) div 2] <= avail[j] for 0 < j < n.
|
||||
* avail[(j-1) div 2] <= avail[j] for 0 < j < n.
|
||||
* In particular, the zero'th element always has the smallest available
|
||||
* space and can be discarded to make room for a new page with more space.
|
||||
* See Knuth's discussion of heap-based priority queues, sec 5.2.3;
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.85 2003/07/29 00:03:18 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.86 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -34,7 +34,7 @@
|
||||
* to duplicate the test in AC_STRUCT_TIMEZONE.
|
||||
*/
|
||||
#ifdef HAVE_TZNAME
|
||||
#ifndef tzname /* For SGI. */
|
||||
#ifndef tzname /* For SGI. */
|
||||
extern char *tzname[];
|
||||
#endif
|
||||
#endif
|
||||
@ -273,12 +273,11 @@ static void
|
||||
clear_tz(void)
|
||||
{
|
||||
/*
|
||||
* unsetenv() works fine, but is BSD, not POSIX, and is not
|
||||
* available under Solaris, among others. Apparently putenv()
|
||||
* called as below clears the process-specific environment
|
||||
* variables. Other reasonable arguments to putenv() (e.g.
|
||||
* "TZ=", "TZ", "") result in a core dump (under Linux
|
||||
* anyway). - thomas 1998-01-26
|
||||
* unsetenv() works fine, but is BSD, not POSIX, and is not available
|
||||
* under Solaris, among others. Apparently putenv() called as below
|
||||
* clears the process-specific environment variables. Other
|
||||
* reasonable arguments to putenv() (e.g. "TZ=", "TZ", "") result in a
|
||||
* core dump (under Linux anyway). - thomas 1998-01-26
|
||||
*/
|
||||
if (tzbuf[0] == 'T')
|
||||
{
|
||||
@ -298,14 +297,14 @@ clear_tz(void)
|
||||
*
|
||||
* If tzname[1] is a nonempty string, *or* the global timezone variable is
|
||||
* not zero, then tzset must have recognized the TZ value as something
|
||||
* different from UTC. Return true.
|
||||
* different from UTC. Return true.
|
||||
*
|
||||
* Otherwise, check to see if the TZ name is a known spelling of "UTC"
|
||||
* (ie, appears in our internal tables as a timezone equivalent to UTC).
|
||||
* If so, accept it.
|
||||
*
|
||||
* This will reject nonstandard spellings of UTC unless tzset() chose to
|
||||
* set tzname[1] as well as tzname[0]. The glibc version of tzset() will
|
||||
* set tzname[1] as well as tzname[0]. The glibc version of tzset() will
|
||||
* do so, but on other systems we may be tightening the spec a little.
|
||||
*
|
||||
* Another problem is that on some platforms (eg HPUX), if tzset thinks the
|
||||
@ -337,8 +336,8 @@ tzset_succeeded(const char *tz)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Check for known spellings of "UTC". Note we must downcase the input
|
||||
* before passing it to DecodePosixTimezone().
|
||||
* Check for known spellings of "UTC". Note we must downcase the
|
||||
* input before passing it to DecodePosixTimezone().
|
||||
*/
|
||||
StrNCpy(tztmp, tz, sizeof(tztmp));
|
||||
for (cp = tztmp; *cp; cp++)
|
||||
@ -368,7 +367,7 @@ tz_acceptable(void)
|
||||
|
||||
/*
|
||||
* To detect leap-second timekeeping, compute the time_t value for
|
||||
* local midnight, 2000-01-01. Insist that this be a multiple of 60;
|
||||
* local midnight, 2000-01-01. Insist that this be a multiple of 60;
|
||||
* any partial-minute offset has to be due to leap seconds.
|
||||
*/
|
||||
MemSet(&tt, 0, sizeof(tt));
|
||||
@ -399,7 +398,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
|
||||
*/
|
||||
if (!have_saved_tz)
|
||||
{
|
||||
char *orig_tz = getenv("TZ");
|
||||
char *orig_tz = getenv("TZ");
|
||||
|
||||
if (orig_tz)
|
||||
StrNCpy(orig_tzbuf, orig_tz, sizeof(orig_tzbuf));
|
||||
@ -434,9 +433,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
|
||||
|
||||
/*
|
||||
* Try to parse it. XXX an invalid interval format will result in
|
||||
* ereport, which is not desirable for GUC. We did what we could to
|
||||
* guard against this in flatten_set_variable_args, but a string
|
||||
* coming in from postgresql.conf might contain anything.
|
||||
* ereport, which is not desirable for GUC. We did what we could
|
||||
* to guard against this in flatten_set_variable_args, but a
|
||||
* string coming in from postgresql.conf might contain anything.
|
||||
*/
|
||||
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
|
||||
CStringGetDatum(val),
|
||||
@ -455,7 +454,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
|
||||
if (doit)
|
||||
{
|
||||
/* Here we change from SQL to Unix sign convention */
|
||||
CTimeZone = - interval->time;
|
||||
CTimeZone = -interval->time;
|
||||
HasCTZSet = true;
|
||||
}
|
||||
pfree(interval);
|
||||
@ -471,22 +470,22 @@ assign_timezone(const char *value, bool doit, bool interactive)
|
||||
if (doit)
|
||||
{
|
||||
/* Here we change from SQL to Unix sign convention */
|
||||
CTimeZone = - hours * 3600;
|
||||
CTimeZone = -hours * 3600;
|
||||
HasCTZSet = true;
|
||||
}
|
||||
}
|
||||
else if (strcasecmp(value, "UNKNOWN") == 0)
|
||||
{
|
||||
/*
|
||||
* UNKNOWN is the value shown as the "default" for TimeZone
|
||||
* in guc.c. We interpret it as meaning the original TZ
|
||||
* inherited from the environment. Note that if there is an
|
||||
* original TZ setting, we will return that rather than UNKNOWN
|
||||
* as the canonical spelling.
|
||||
* UNKNOWN is the value shown as the "default" for TimeZone in
|
||||
* guc.c. We interpret it as meaning the original TZ
|
||||
* inherited from the environment. Note that if there is an
|
||||
* original TZ setting, we will return that rather than
|
||||
* UNKNOWN as the canonical spelling.
|
||||
*/
|
||||
if (doit)
|
||||
{
|
||||
bool ok;
|
||||
bool ok;
|
||||
|
||||
/* Revert to original setting of TZ, whatever it was */
|
||||
if (orig_tzbuf[0])
|
||||
@ -516,14 +515,14 @@ assign_timezone(const char *value, bool doit, bool interactive)
|
||||
* Otherwise assume it is a timezone name.
|
||||
*
|
||||
* We have to actually apply the change before we can have any
|
||||
* hope of checking it. So, save the old value in case we have
|
||||
* to back out. Note that it's possible the old setting is in
|
||||
* tzbuf, so we'd better copy it.
|
||||
* hope of checking it. So, save the old value in case we
|
||||
* have to back out. Note that it's possible the old setting
|
||||
* is in tzbuf, so we'd better copy it.
|
||||
*/
|
||||
char save_tzbuf[TZBUF_LEN];
|
||||
char *save_tz;
|
||||
bool known,
|
||||
acceptable;
|
||||
char save_tzbuf[TZBUF_LEN];
|
||||
char *save_tz;
|
||||
bool known,
|
||||
acceptable;
|
||||
|
||||
save_tz = getenv("TZ");
|
||||
if (save_tz)
|
||||
@ -563,8 +562,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
|
||||
{
|
||||
ereport(interactive ? ERROR : LOG,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("timezone \"%s\" appears to use leap seconds",
|
||||
value),
|
||||
errmsg("timezone \"%s\" appears to use leap seconds",
|
||||
value),
|
||||
errdetail("PostgreSQL does not support leap seconds")));
|
||||
return NULL;
|
||||
}
|
||||
@ -609,7 +608,7 @@ show_timezone(void)
|
||||
Interval interval;
|
||||
|
||||
interval.month = 0;
|
||||
interval.time = - CTimeZone;
|
||||
interval.time = -CTimeZone;
|
||||
|
||||
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
|
||||
IntervalPGetDatum(&interval)));
|
||||
@ -703,16 +702,16 @@ assign_client_encoding(const char *value, bool doit, bool interactive)
|
||||
/*
|
||||
* Note: if we are in startup phase then SetClientEncoding may not be
|
||||
* able to really set the encoding. In this case we will assume that
|
||||
* the encoding is okay, and InitializeClientEncoding() will fix things
|
||||
* once initialization is complete.
|
||||
* the encoding is okay, and InitializeClientEncoding() will fix
|
||||
* things once initialization is complete.
|
||||
*/
|
||||
if (SetClientEncoding(encoding, doit) < 0)
|
||||
{
|
||||
if (interactive)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("conversion between %s and %s is not supported",
|
||||
value, GetDatabaseEncodingName())));
|
||||
errmsg("conversion between %s and %s is not supported",
|
||||
value, GetDatabaseEncodingName())));
|
||||
return NULL;
|
||||
}
|
||||
return value;
|
||||
@ -758,12 +757,12 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
|
||||
/* not a saved ID, so look it up */
|
||||
HeapTuple userTup;
|
||||
|
||||
if (! IsTransactionState())
|
||||
if (!IsTransactionState())
|
||||
{
|
||||
/*
|
||||
* Can't do catalog lookups, so fail. The upshot of this is
|
||||
* that session_authorization cannot be set in postgresql.conf,
|
||||
* which seems like a good thing anyway.
|
||||
* that session_authorization cannot be set in
|
||||
* postgresql.conf, which seems like a good thing anyway.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
@ -782,7 +781,7 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
|
||||
|
||||
usesysid = ((Form_pg_shadow) GETSTRUCT(userTup))->usesysid;
|
||||
is_superuser = ((Form_pg_shadow) GETSTRUCT(userTup))->usesuper;
|
||||
|
||||
|
||||
ReleaseSysCache(userTup);
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.75 2003/08/01 00:15:20 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.76 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -190,8 +190,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
|
||||
newattr->atttypmod != oldattr->atttypmod)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
||||
errmsg("cannot change datatype of view column \"%s\"",
|
||||
NameStr(oldattr->attname))));
|
||||
errmsg("cannot change datatype of view column \"%s\"",
|
||||
NameStr(oldattr->attname))));
|
||||
/* We can ignore the remaining attributes of an attribute... */
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.72 2003/07/21 17:05:00 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.73 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -48,7 +48,7 @@
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecReScan(PlanState *node, ExprContext *exprCtxt)
|
||||
ExecReScan(PlanState * node, ExprContext *exprCtxt)
|
||||
{
|
||||
/* If collecting timing stats, update them */
|
||||
if (node->instrument)
|
||||
@ -61,7 +61,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
|
||||
|
||||
foreach(lst, node->initPlan)
|
||||
{
|
||||
SubPlanState *sstate = (SubPlanState *) lfirst(lst);
|
||||
SubPlanState *sstate = (SubPlanState *) lfirst(lst);
|
||||
PlanState *splan = sstate->planstate;
|
||||
|
||||
if (splan->plan->extParam != NULL) /* don't care about child
|
||||
@ -72,7 +72,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
|
||||
}
|
||||
foreach(lst, node->subPlan)
|
||||
{
|
||||
SubPlanState *sstate = (SubPlanState *) lfirst(lst);
|
||||
SubPlanState *sstate = (SubPlanState *) lfirst(lst);
|
||||
PlanState *splan = sstate->planstate;
|
||||
|
||||
if (splan->plan->extParam != NULL)
|
||||
@ -177,7 +177,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
|
||||
* Marks the current scan position.
|
||||
*/
|
||||
void
|
||||
ExecMarkPos(PlanState *node)
|
||||
ExecMarkPos(PlanState * node)
|
||||
{
|
||||
switch (nodeTag(node))
|
||||
{
|
||||
@ -218,7 +218,7 @@ ExecMarkPos(PlanState *node)
|
||||
* restores the scan position previously saved with ExecMarkPos()
|
||||
*/
|
||||
void
|
||||
ExecRestrPos(PlanState *node)
|
||||
ExecRestrPos(PlanState * node)
|
||||
{
|
||||
switch (nodeTag(node))
|
||||
{
|
||||
@ -302,16 +302,16 @@ ExecSupportsBackwardScan(Plan *node)
|
||||
return false;
|
||||
|
||||
case T_Append:
|
||||
{
|
||||
List *l;
|
||||
|
||||
foreach(l, ((Append *) node)->appendplans)
|
||||
{
|
||||
if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
|
||||
return false;
|
||||
List *l;
|
||||
|
||||
foreach(l, ((Append *) node)->appendplans)
|
||||
{
|
||||
if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
case T_SeqScan:
|
||||
case T_IndexScan:
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.4 2003/07/21 17:05:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.5 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -249,7 +249,7 @@ execTuplesHashPrepare(TupleDesc tupdesc,
|
||||
eq_function = oprfuncid(optup);
|
||||
ReleaseSysCache(optup);
|
||||
hash_function = get_op_hash_function(eq_opr);
|
||||
if (!OidIsValid(hash_function)) /* should not happen */
|
||||
if (!OidIsValid(hash_function)) /* should not happen */
|
||||
elog(ERROR, "could not find hash function for hash operator %u",
|
||||
eq_opr);
|
||||
fmgr_info(eq_function, &(*eqfunctions)[i]);
|
||||
@ -289,8 +289,8 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
|
||||
int nbuckets, Size entrysize,
|
||||
MemoryContext tablecxt, MemoryContext tempcxt)
|
||||
{
|
||||
TupleHashTable hashtable;
|
||||
Size tabsize;
|
||||
TupleHashTable hashtable;
|
||||
Size tabsize;
|
||||
|
||||
Assert(nbuckets > 0);
|
||||
Assert(entrysize >= sizeof(TupleHashEntryData));
|
||||
@ -411,9 +411,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
|
||||
* Iterator state must be initialized with ResetTupleHashIterator() macro.
|
||||
*/
|
||||
TupleHashEntry
|
||||
ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator *state)
|
||||
ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator * state)
|
||||
{
|
||||
TupleHashEntry entry;
|
||||
TupleHashEntry entry;
|
||||
|
||||
entry = state->next_entry;
|
||||
while (entry == NULL)
|
||||
|
@ -26,7 +26,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.212 2003/08/01 00:15:20 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.213 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -68,7 +68,7 @@ static void initResultRelInfo(ResultRelInfo *resultRelInfo,
|
||||
Index resultRelationIndex,
|
||||
List *rangeTable,
|
||||
CmdType operation);
|
||||
static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
|
||||
static TupleTableSlot *ExecutePlan(EState *estate, PlanState * planstate,
|
||||
CmdType operation,
|
||||
long numberTuples,
|
||||
ScanDirection direction,
|
||||
@ -87,7 +87,7 @@ static void EndEvalPlanQual(EState *estate);
|
||||
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
|
||||
static void ExecCheckXactReadOnly(Query *parsetree, CmdType operation);
|
||||
static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
|
||||
evalPlanQual *priorepq);
|
||||
evalPlanQual *priorepq);
|
||||
static void EvalPlanQualStop(evalPlanQual *epq);
|
||||
|
||||
/* end of local decls */
|
||||
@ -100,7 +100,7 @@ static void EvalPlanQualStop(evalPlanQual *epq);
|
||||
* query plan
|
||||
*
|
||||
* Takes a QueryDesc previously created by CreateQueryDesc (it's not real
|
||||
* clear why we bother to separate the two functions, but...). The tupDesc
|
||||
* clear why we bother to separate the two functions, but...). The tupDesc
|
||||
* field of the QueryDesc is filled in to describe the tuples that will be
|
||||
* returned, and the internal fields (estate and planstate) are set up.
|
||||
*
|
||||
@ -122,8 +122,8 @@ ExecutorStart(QueryDesc *queryDesc, bool explainOnly)
|
||||
Assert(queryDesc->estate == NULL);
|
||||
|
||||
/*
|
||||
* If the transaction is read-only, we need to check if any writes
|
||||
* are planned to non-temporary tables.
|
||||
* If the transaction is read-only, we need to check if any writes are
|
||||
* planned to non-temporary tables.
|
||||
*/
|
||||
if (!explainOnly)
|
||||
ExecCheckXactReadOnly(queryDesc->parsetree, queryDesc->operation);
|
||||
@ -362,8 +362,8 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
|
||||
|
||||
/*
|
||||
* Otherwise, only plain-relation RTEs need to be checked here.
|
||||
* Function RTEs are checked by init_fcache when the function is prepared
|
||||
* for execution. Join and special RTEs need no checks.
|
||||
* Function RTEs are checked by init_fcache when the function is
|
||||
* prepared for execution. Join and special RTEs need no checks.
|
||||
*/
|
||||
if (rte->rtekind != RTE_RELATION)
|
||||
return;
|
||||
@ -435,7 +435,7 @@ ExecCheckXactReadOnly(Query *parsetree, CmdType operation)
|
||||
if (operation == CMD_DELETE || operation == CMD_INSERT
|
||||
|| operation == CMD_UPDATE)
|
||||
{
|
||||
List *lp;
|
||||
List *lp;
|
||||
|
||||
foreach(lp, parsetree->rtable)
|
||||
{
|
||||
@ -474,9 +474,9 @@ static void
|
||||
InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
{
|
||||
CmdType operation = queryDesc->operation;
|
||||
Query *parseTree = queryDesc->parsetree;
|
||||
Plan *plan = queryDesc->plantree;
|
||||
EState *estate = queryDesc->estate;
|
||||
Query *parseTree = queryDesc->parsetree;
|
||||
Plan *plan = queryDesc->plantree;
|
||||
EState *estate = queryDesc->estate;
|
||||
PlanState *planstate;
|
||||
List *rangeTable;
|
||||
Relation intoRelationDesc;
|
||||
@ -484,8 +484,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
TupleDesc tupType;
|
||||
|
||||
/*
|
||||
* Do permissions checks. It's sufficient to examine the query's
|
||||
* top rangetable here --- subplan RTEs will be checked during
|
||||
* Do permissions checks. It's sufficient to examine the query's top
|
||||
* rangetable here --- subplan RTEs will be checked during
|
||||
* ExecInitSubPlan().
|
||||
*/
|
||||
ExecCheckRTPerms(parseTree->rtable, operation);
|
||||
@ -570,10 +570,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
if (operation == CMD_SELECT && parseTree->into != NULL)
|
||||
{
|
||||
do_select_into = true;
|
||||
|
||||
/*
|
||||
* For now, always create OIDs in SELECT INTO; this is for backwards
|
||||
* compatibility with pre-7.3 behavior. Eventually we might want
|
||||
* to allow the user to choose.
|
||||
* For now, always create OIDs in SELECT INTO; this is for
|
||||
* backwards compatibility with pre-7.3 behavior. Eventually we
|
||||
* might want to allow the user to choose.
|
||||
*/
|
||||
estate->es_force_oids = true;
|
||||
}
|
||||
@ -640,12 +641,12 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
tupType = ExecGetResultType(planstate);
|
||||
|
||||
/*
|
||||
* Initialize the junk filter if needed. SELECT and INSERT queries need a
|
||||
* filter if there are any junk attrs in the tlist. INSERT and SELECT
|
||||
* INTO also need a filter if the top plan node is a scan node that's not
|
||||
* doing projection (else we'll be scribbling on the scan tuple!) UPDATE
|
||||
* and DELETE always need a filter, since there's always a junk 'ctid'
|
||||
* attribute present --- no need to look first.
|
||||
* Initialize the junk filter if needed. SELECT and INSERT queries
|
||||
* need a filter if there are any junk attrs in the tlist. INSERT and
|
||||
* SELECT INTO also need a filter if the top plan node is a scan node
|
||||
* that's not doing projection (else we'll be scribbling on the scan
|
||||
* tuple!) UPDATE and DELETE always need a filter, since there's
|
||||
* always a junk 'ctid' attribute present --- no need to look first.
|
||||
*/
|
||||
{
|
||||
bool junk_filter_needed = false;
|
||||
@ -752,8 +753,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
|
||||
/*
|
||||
* If doing SELECT INTO, initialize the "into" relation. We must wait
|
||||
* till now so we have the "clean" result tuple type to create the
|
||||
* new table from.
|
||||
* till now so we have the "clean" result tuple type to create the new
|
||||
* table from.
|
||||
*
|
||||
* If EXPLAIN, skip creating the "into" relation.
|
||||
*/
|
||||
@ -795,16 +796,16 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
FreeTupleDesc(tupdesc);
|
||||
|
||||
/*
|
||||
* Advance command counter so that the newly-created
|
||||
* relation's catalog tuples will be visible to heap_open.
|
||||
* Advance command counter so that the newly-created relation's
|
||||
* catalog tuples will be visible to heap_open.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
/*
|
||||
* If necessary, create a TOAST table for the into
|
||||
* relation. Note that AlterTableCreateToastTable ends
|
||||
* with CommandCounterIncrement(), so that the TOAST table
|
||||
* will be visible for insertion.
|
||||
* If necessary, create a TOAST table for the into relation. Note
|
||||
* that AlterTableCreateToastTable ends with
|
||||
* CommandCounterIncrement(), so that the TOAST table will be
|
||||
* visible for insertion.
|
||||
*/
|
||||
AlterTableCreateToastTable(intoRelationId, true);
|
||||
|
||||
@ -841,19 +842,19 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot change sequence relation \"%s\"",
|
||||
RelationGetRelationName(resultRelationDesc))));
|
||||
RelationGetRelationName(resultRelationDesc))));
|
||||
break;
|
||||
case RELKIND_TOASTVALUE:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot change toast relation \"%s\"",
|
||||
RelationGetRelationName(resultRelationDesc))));
|
||||
RelationGetRelationName(resultRelationDesc))));
|
||||
break;
|
||||
case RELKIND_VIEW:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot change view relation \"%s\"",
|
||||
RelationGetRelationName(resultRelationDesc))));
|
||||
RelationGetRelationName(resultRelationDesc))));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -894,7 +895,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecEndPlan(PlanState *planstate, EState *estate)
|
||||
ExecEndPlan(PlanState * planstate, EState *estate)
|
||||
{
|
||||
ResultRelInfo *resultRelInfo;
|
||||
int i;
|
||||
@ -964,18 +965,18 @@ ExecEndPlan(PlanState *planstate, EState *estate)
|
||||
*/
|
||||
static TupleTableSlot *
|
||||
ExecutePlan(EState *estate,
|
||||
PlanState *planstate,
|
||||
PlanState * planstate,
|
||||
CmdType operation,
|
||||
long numberTuples,
|
||||
ScanDirection direction,
|
||||
DestReceiver *dest)
|
||||
{
|
||||
JunkFilter *junkfilter;
|
||||
TupleTableSlot *slot;
|
||||
ItemPointer tupleid = NULL;
|
||||
ItemPointerData tuple_ctid;
|
||||
long current_tuple_count;
|
||||
TupleTableSlot *result;
|
||||
JunkFilter *junkfilter;
|
||||
TupleTableSlot *slot;
|
||||
ItemPointer tupleid = NULL;
|
||||
ItemPointerData tuple_ctid;
|
||||
long current_tuple_count;
|
||||
TupleTableSlot *result;
|
||||
|
||||
/*
|
||||
* initialize local variables
|
||||
@ -1199,7 +1200,7 @@ lnext: ;
|
||||
|
||||
/*
|
||||
* check our tuple count.. if we've processed the proper number
|
||||
* then quit, else loop again and process more tuples. Zero
|
||||
* then quit, else loop again and process more tuples. Zero
|
||||
* numberTuples means no limit.
|
||||
*/
|
||||
current_tuple_count++;
|
||||
@ -1309,7 +1310,7 @@ ExecInsert(TupleTableSlot *slot,
|
||||
|
||||
/* BEFORE ROW INSERT Triggers */
|
||||
if (resultRelInfo->ri_TrigDesc &&
|
||||
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
|
||||
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
|
||||
{
|
||||
HeapTuple newtuple;
|
||||
|
||||
@ -1686,13 +1687,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_NOT_NULL_VIOLATION),
|
||||
errmsg("null value for attribute \"%s\" violates NOT NULL constraint",
|
||||
NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
|
||||
NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
|
||||
}
|
||||
}
|
||||
|
||||
if (constr->num_check > 0)
|
||||
{
|
||||
const char *failed;
|
||||
const char *failed;
|
||||
|
||||
if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
|
||||
ereport(ERROR,
|
||||
@ -1884,10 +1885,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
||||
* integrated with the Param mechanism somehow, so that the upper plan
|
||||
* nodes know that their children's outputs have changed.
|
||||
*
|
||||
* Note that the stack of free evalPlanQual nodes is quite useless at
|
||||
* the moment, since it only saves us from pallocing/releasing the
|
||||
* Note that the stack of free evalPlanQual nodes is quite useless at the
|
||||
* moment, since it only saves us from pallocing/releasing the
|
||||
* evalPlanQual nodes themselves. But it will be useful once we
|
||||
* implement ReScan instead of end/restart for re-using PlanQual nodes.
|
||||
* implement ReScan instead of end/restart for re-using PlanQual
|
||||
* nodes.
|
||||
*/
|
||||
if (endNode)
|
||||
{
|
||||
@ -1898,10 +1900,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
||||
/*
|
||||
* Initialize new recheck query.
|
||||
*
|
||||
* Note: if we were re-using PlanQual plans via ExecReScan, we'd need
|
||||
* to instead copy down changeable state from the top plan (including
|
||||
* es_result_relation_info, es_junkFilter) and reset locally changeable
|
||||
* state in the epq (including es_param_exec_vals, es_evTupleNull).
|
||||
* Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
|
||||
* instead copy down changeable state from the top plan (including
|
||||
* es_result_relation_info, es_junkFilter) and reset locally
|
||||
* changeable state in the epq (including es_param_exec_vals,
|
||||
* es_evTupleNull).
|
||||
*/
|
||||
EvalPlanQualStart(epq, estate, epq->next);
|
||||
|
||||
@ -2016,9 +2019,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
|
||||
|
||||
/*
|
||||
* The epqstates share the top query's copy of unchanging state such
|
||||
* as the snapshot, rangetable, result-rel info, and external Param info.
|
||||
* They need their own copies of local state, including a tuple table,
|
||||
* es_param_exec_vals, etc.
|
||||
* as the snapshot, rangetable, result-rel info, and external Param
|
||||
* info. They need their own copies of local state, including a tuple
|
||||
* table, es_param_exec_vals, etc.
|
||||
*/
|
||||
epqstate->es_direction = ForwardScanDirection;
|
||||
epqstate->es_snapshot = estate->es_snapshot;
|
||||
@ -2036,11 +2039,11 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
|
||||
epqstate->es_instrument = estate->es_instrument;
|
||||
epqstate->es_force_oids = estate->es_force_oids;
|
||||
epqstate->es_topPlan = estate->es_topPlan;
|
||||
|
||||
/*
|
||||
* Each epqstate must have its own es_evTupleNull state, but
|
||||
* all the stack entries share es_evTuple state. This allows
|
||||
* sub-rechecks to inherit the value being examined by an
|
||||
* outer recheck.
|
||||
* Each epqstate must have its own es_evTupleNull state, but all the
|
||||
* stack entries share es_evTuple state. This allows sub-rechecks to
|
||||
* inherit the value being examined by an outer recheck.
|
||||
*/
|
||||
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
|
||||
if (priorepq == NULL)
|
||||
|
@ -12,7 +12,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.37 2003/07/21 17:05:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.38 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -52,7 +52,7 @@
|
||||
* * ExecInitNode() notices that it is looking at a nest loop and
|
||||
* as the code below demonstrates, it calls ExecInitNestLoop().
|
||||
* Eventually this calls ExecInitNode() on the right and left subplans
|
||||
* and so forth until the entire plan is initialized. The result
|
||||
* and so forth until the entire plan is initialized. The result
|
||||
* of ExecInitNode() is a plan state tree built with the same structure
|
||||
* as the underlying plan tree.
|
||||
*
|
||||
@ -226,7 +226,7 @@ ExecInitNode(Plan *node, EState *estate)
|
||||
subps = NIL;
|
||||
foreach(subp, node->initPlan)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) lfirst(subp);
|
||||
SubPlan *subplan = (SubPlan *) lfirst(subp);
|
||||
SubPlanState *sstate;
|
||||
|
||||
Assert(IsA(subplan, SubPlan));
|
||||
@ -237,9 +237,9 @@ ExecInitNode(Plan *node, EState *estate)
|
||||
result->initPlan = subps;
|
||||
|
||||
/*
|
||||
* Initialize any subPlans present in this node. These were found
|
||||
* by ExecInitExpr during initialization of the PlanState. Note we
|
||||
* must do this after initializing initPlans, in case their arguments
|
||||
* Initialize any subPlans present in this node. These were found by
|
||||
* ExecInitExpr during initialization of the PlanState. Note we must
|
||||
* do this after initializing initPlans, in case their arguments
|
||||
* contain subPlans (is that actually possible? perhaps not).
|
||||
*/
|
||||
subps = NIL;
|
||||
@ -268,7 +268,7 @@ ExecInitNode(Plan *node, EState *estate)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
TupleTableSlot *
|
||||
ExecProcNode(PlanState *node)
|
||||
ExecProcNode(PlanState * node)
|
||||
{
|
||||
TupleTableSlot *result;
|
||||
|
||||
@ -280,7 +280,7 @@ ExecProcNode(PlanState *node)
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
|
||||
if (node->chgParam != NULL) /* something changed */
|
||||
if (node->chgParam != NULL) /* something changed */
|
||||
ExecReScan(node, NULL); /* let ReScan handle this */
|
||||
|
||||
if (node->instrument)
|
||||
@ -484,7 +484,7 @@ ExecCountSlotsNode(Plan *node)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecEndNode(PlanState *node)
|
||||
ExecEndNode(PlanState * node)
|
||||
{
|
||||
List *subp;
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.138 2003/08/01 00:15:21 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.139 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -50,55 +50,55 @@
|
||||
|
||||
|
||||
/* static function decls */
|
||||
static Datum ExecEvalAggref(AggrefExprState *aggref,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalArrayRef(ArrayRefExprState *astate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalAggref(AggrefExprState * aggref,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalArrayRef(ArrayRefExprState * astate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull);
|
||||
static Datum ExecEvalParam(Param *expression, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalFunc(FuncExprState * fcache, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext,
|
||||
static Datum ExecEvalOper(FuncExprState * fcache, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext,
|
||||
static Datum ExecEvalDistinct(FuncExprState * fcache, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
ExprContext *econtext, bool *isNull);
|
||||
static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState * sstate,
|
||||
ExprContext *econtext, bool *isNull);
|
||||
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
|
||||
List *argList, ExprContext *econtext);
|
||||
static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
|
||||
static Datum ExecEvalNot(BoolExprState * notclause, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalOr(BoolExprState * orExpr, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalAnd(BoolExprState * andExpr, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalCase(CaseExprState * caseExpr, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalArray(ArrayExprState *astate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalNullTest(GenericExprState *nstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalBooleanTest(GenericExprState *bstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate,
|
||||
static Datum ExecEvalArray(ArrayExprState * astate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalCoalesce(CoalesceExprState * coalesceExpr,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalNullIf(FuncExprState * nullIfExpr, ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecEvalNullTest(GenericExprState * nstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalBooleanTest(GenericExprState * bstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCoerceToDomain(CoerceToDomainState * cstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
|
||||
ExprContext *econtext, bool *isNull);
|
||||
static Datum ExecEvalFieldSelect(GenericExprState *fstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCoerceToDomainValue(CoerceToDomainValue * conVal,
|
||||
ExprContext *econtext, bool *isNull);
|
||||
static Datum ExecEvalFieldSelect(GenericExprState * fstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
|
||||
|
||||
/*----------
|
||||
@ -127,7 +127,7 @@ static Datum ExecEvalFieldSelect(GenericExprState *fstate,
|
||||
*----------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalArrayRef(ArrayRefExprState *astate,
|
||||
ExecEvalArrayRef(ArrayRefExprState * astate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -301,7 +301,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalAggref(AggrefExprState *aggref, ExprContext *econtext, bool *isNull)
|
||||
ExecEvalAggref(AggrefExprState * aggref, ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
if (econtext->ecxt_aggvalues == NULL) /* safety check */
|
||||
elog(ERROR, "no aggregates in this expression context");
|
||||
@ -382,8 +382,8 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
|
||||
*
|
||||
* XXX this is a horrid crock: since the pointer to the slot might live
|
||||
* longer than the current evaluation context, we are forced to copy
|
||||
* the tuple and slot into a long-lived context --- we use
|
||||
* the econtext's per-query memory which should be safe enough. This
|
||||
* the tuple and slot into a long-lived context --- we use the
|
||||
* econtext's per-query memory which should be safe enough. This
|
||||
* represents a serious memory leak if many such tuples are processed
|
||||
* in one command, however. We ought to redesign the representation
|
||||
* of whole-tuple datums so that this is not necessary.
|
||||
@ -439,7 +439,8 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
/*
|
||||
* PARAM_EXEC params (internal executor parameters) are stored in
|
||||
* the ecxt_param_exec_vals array, and can be accessed by array index.
|
||||
* the ecxt_param_exec_vals array, and can be accessed by array
|
||||
* index.
|
||||
*/
|
||||
ParamExecData *prm;
|
||||
|
||||
@ -457,9 +458,9 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* All other parameter types must be sought in ecxt_param_list_info.
|
||||
* NOTE: The last entry in the param array is always an
|
||||
* entry with kind == PARAM_INVALID.
|
||||
* All other parameter types must be sought in
|
||||
* ecxt_param_list_info. NOTE: The last entry in the param array
|
||||
* is always an entry with kind == PARAM_INVALID.
|
||||
*/
|
||||
ParamListInfo paramList = econtext->ecxt_param_list_info;
|
||||
char *thisParamName = expression->paramname;
|
||||
@ -488,8 +489,8 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
|
||||
}
|
||||
if (!matchFound)
|
||||
paramList++;
|
||||
} /* while */
|
||||
} /* if */
|
||||
} /* while */
|
||||
} /* if */
|
||||
|
||||
if (!matchFound)
|
||||
{
|
||||
@ -605,7 +606,7 @@ GetAttributeByName(TupleTableSlot *slot, char *attname, bool *isNull)
|
||||
* init_fcache - initialize a FuncExprState node during first use
|
||||
*/
|
||||
void
|
||||
init_fcache(Oid foid, FuncExprState *fcache, MemoryContext fcacheCxt)
|
||||
init_fcache(Oid foid, FuncExprState * fcache, MemoryContext fcacheCxt)
|
||||
{
|
||||
AclResult aclresult;
|
||||
|
||||
@ -678,7 +679,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
|
||||
* Evaluate the arguments to a function and then the function itself.
|
||||
*/
|
||||
Datum
|
||||
ExecMakeFunctionResult(FuncExprState *fcache,
|
||||
ExecMakeFunctionResult(FuncExprState * fcache,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -881,7 +882,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
|
||||
* object. (If function returns an empty set, we just return NULL instead.)
|
||||
*/
|
||||
Tuplestorestate *
|
||||
ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
ExecMakeTableFunctionResult(ExprState * funcexpr,
|
||||
ExprContext *econtext,
|
||||
TupleDesc expectedDesc,
|
||||
TupleDesc *returnDesc)
|
||||
@ -899,14 +900,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
bool returnsTuple = false;
|
||||
|
||||
/*
|
||||
* Normally the passed expression tree will be a FuncExprState, since the
|
||||
* grammar only allows a function call at the top level of a table
|
||||
* function reference. However, if the function doesn't return set then
|
||||
* the planner might have replaced the function call via constant-folding
|
||||
* or inlining. So if we see any other kind of expression node, execute
|
||||
* it via the general ExecEvalExpr() code; the only difference is that
|
||||
* we don't get a chance to pass a special ReturnSetInfo to any functions
|
||||
* buried in the expression.
|
||||
* Normally the passed expression tree will be a FuncExprState, since
|
||||
* the grammar only allows a function call at the top level of a table
|
||||
* function reference. However, if the function doesn't return set
|
||||
* then the planner might have replaced the function call via
|
||||
* constant-folding or inlining. So if we see any other kind of
|
||||
* expression node, execute it via the general ExecEvalExpr() code;
|
||||
* the only difference is that we don't get a chance to pass a special
|
||||
* ReturnSetInfo to any functions buried in the expression.
|
||||
*/
|
||||
if (funcexpr && IsA(funcexpr, FuncExprState) &&
|
||||
IsA(funcexpr->expr, FuncExpr))
|
||||
@ -924,7 +925,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
*/
|
||||
if (fcache->func.fn_oid == InvalidOid)
|
||||
{
|
||||
FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
|
||||
FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
|
||||
|
||||
init_fcache(func->funcid, fcache, econtext->ecxt_per_query_memory);
|
||||
}
|
||||
@ -933,9 +934,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
* Evaluate the function's argument list.
|
||||
*
|
||||
* Note: ideally, we'd do this in the per-tuple context, but then the
|
||||
* argument values would disappear when we reset the context in the
|
||||
* inner loop. So do it in caller context. Perhaps we should make a
|
||||
* separate context just to hold the evaluated arguments?
|
||||
* argument values would disappear when we reset the context in
|
||||
* the inner loop. So do it in caller context. Perhaps we should
|
||||
* make a separate context just to hold the evaluated arguments?
|
||||
*/
|
||||
MemSet(&fcinfo, 0, sizeof(fcinfo));
|
||||
fcinfo.flinfo = &(fcache->func);
|
||||
@ -990,7 +991,8 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
rsinfo.setDesc = NULL;
|
||||
|
||||
/*
|
||||
* Switch to short-lived context for calling the function or expression.
|
||||
* Switch to short-lived context for calling the function or
|
||||
* expression.
|
||||
*/
|
||||
callerContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
|
||||
|
||||
@ -1004,9 +1006,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
HeapTuple tuple;
|
||||
|
||||
/*
|
||||
* reset per-tuple memory context before each call of the
|
||||
* function or expression. This cleans up any local memory the
|
||||
* function may leak when called.
|
||||
* reset per-tuple memory context before each call of the function
|
||||
* or expression. This cleans up any local memory the function may
|
||||
* leak when called.
|
||||
*/
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -1157,7 +1159,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalFunc(FuncExprState *fcache,
|
||||
ExecEvalFunc(FuncExprState * fcache,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -1167,7 +1169,7 @@ ExecEvalFunc(FuncExprState *fcache,
|
||||
*/
|
||||
if (fcache->func.fn_oid == InvalidOid)
|
||||
{
|
||||
FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
|
||||
FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
|
||||
|
||||
init_fcache(func->funcid, fcache, econtext->ecxt_per_query_memory);
|
||||
}
|
||||
@ -1180,7 +1182,7 @@ ExecEvalFunc(FuncExprState *fcache,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalOper(FuncExprState *fcache,
|
||||
ExecEvalOper(FuncExprState * fcache,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -1190,7 +1192,7 @@ ExecEvalOper(FuncExprState *fcache,
|
||||
*/
|
||||
if (fcache->func.fn_oid == InvalidOid)
|
||||
{
|
||||
OpExpr *op = (OpExpr *) fcache->xprstate.expr;
|
||||
OpExpr *op = (OpExpr *) fcache->xprstate.expr;
|
||||
|
||||
init_fcache(op->opfuncid, fcache, econtext->ecxt_per_query_memory);
|
||||
}
|
||||
@ -1210,7 +1212,7 @@ ExecEvalOper(FuncExprState *fcache,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalDistinct(FuncExprState *fcache,
|
||||
ExecEvalDistinct(FuncExprState * fcache,
|
||||
ExprContext *econtext,
|
||||
bool *isNull)
|
||||
{
|
||||
@ -1242,7 +1244,7 @@ ExecEvalDistinct(FuncExprState *fcache,
|
||||
if (argDone != ExprSingleResult)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("IS DISTINCT FROM does not support set arguments")));
|
||||
errmsg("IS DISTINCT FROM does not support set arguments")));
|
||||
Assert(fcinfo.nargs == 2);
|
||||
|
||||
if (fcinfo.argnull[0] && fcinfo.argnull[1])
|
||||
@ -1272,11 +1274,11 @@ ExecEvalDistinct(FuncExprState *fcache,
|
||||
*
|
||||
* Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean,
|
||||
* and we combine the results across all array elements using OR and AND
|
||||
* (for ANY and ALL respectively). Of course we short-circuit as soon as
|
||||
* (for ANY and ALL respectively). Of course we short-circuit as soon as
|
||||
* the result is known.
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
ExecEvalScalarArrayOp(ScalarArrayOpExprState * sstate,
|
||||
ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) sstate->fxprstate.xprstate.expr;
|
||||
@ -1310,12 +1312,12 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
if (argDone != ExprSingleResult)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("op ANY/ALL (array) does not support set arguments")));
|
||||
errmsg("op ANY/ALL (array) does not support set arguments")));
|
||||
Assert(fcinfo.nargs == 2);
|
||||
|
||||
/*
|
||||
* If the array is NULL then we return NULL --- it's not very meaningful
|
||||
* to do anything else, even if the operator isn't strict.
|
||||
* If the array is NULL then we return NULL --- it's not very
|
||||
* meaningful to do anything else, even if the operator isn't strict.
|
||||
*/
|
||||
if (fcinfo.argnull[1])
|
||||
{
|
||||
@ -1334,6 +1336,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
|
||||
if (nitems <= 0)
|
||||
return BoolGetDatum(!useOr);
|
||||
|
||||
/*
|
||||
* If the scalar is NULL, and the function is strict, return NULL.
|
||||
* This is just to avoid having to test for strictness inside the
|
||||
@ -1347,8 +1350,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
}
|
||||
|
||||
/*
|
||||
* We arrange to look up info about the element type only
|
||||
* once per series of calls, assuming the element type doesn't change
|
||||
* We arrange to look up info about the element type only once per
|
||||
* series of calls, assuming the element type doesn't change
|
||||
* underneath us.
|
||||
*/
|
||||
if (sstate->element_type != ARR_ELEMTYPE(arr))
|
||||
@ -1370,8 +1373,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
s = (char *) ARR_DATA_PTR(arr);
|
||||
for (i = 0; i < nitems; i++)
|
||||
{
|
||||
Datum elt;
|
||||
Datum thisresult;
|
||||
Datum elt;
|
||||
Datum thisresult;
|
||||
|
||||
/* Get array element */
|
||||
elt = fetch_att(s, typbyval, typlen);
|
||||
@ -1394,7 +1397,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
{
|
||||
result = BoolGetDatum(true);
|
||||
resultnull = false;
|
||||
break; /* needn't look at any more elements */
|
||||
break; /* needn't look at any more elements */
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -1403,7 +1406,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
{
|
||||
result = BoolGetDatum(false);
|
||||
resultnull = false;
|
||||
break; /* needn't look at any more elements */
|
||||
break; /* needn't look at any more elements */
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1428,7 +1431,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, bool *isNull)
|
||||
ExecEvalNot(BoolExprState * notclause, ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
ExprState *clause;
|
||||
Datum expr_value;
|
||||
@ -1456,7 +1459,7 @@ ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, bool *isNull)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, bool *isNull)
|
||||
ExecEvalOr(BoolExprState * orExpr, ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
List *clauses;
|
||||
List *clause;
|
||||
@ -1504,7 +1507,7 @@ ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, bool *isNull)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, bool *isNull)
|
||||
ExecEvalAnd(BoolExprState * andExpr, ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
List *clauses;
|
||||
List *clause;
|
||||
@ -1552,7 +1555,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, bool *isNull)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
|
||||
ExecEvalCase(CaseExprState * caseExpr, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone)
|
||||
{
|
||||
List *clauses;
|
||||
@ -1610,22 +1613,22 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
|
||||
ExecEvalArray(ArrayExprState * astate, ExprContext *econtext,
|
||||
bool *isNull)
|
||||
{
|
||||
ArrayExpr *arrayExpr = (ArrayExpr *) astate->xprstate.expr;
|
||||
ArrayExpr *arrayExpr = (ArrayExpr *) astate->xprstate.expr;
|
||||
ArrayType *result;
|
||||
List *element;
|
||||
Oid element_type = arrayExpr->element_typeid;
|
||||
int ndims = arrayExpr->ndims;
|
||||
int dims[MAXDIM];
|
||||
int lbs[MAXDIM];
|
||||
List *element;
|
||||
Oid element_type = arrayExpr->element_typeid;
|
||||
int ndims = arrayExpr->ndims;
|
||||
int dims[MAXDIM];
|
||||
int lbs[MAXDIM];
|
||||
|
||||
if (ndims == 1)
|
||||
{
|
||||
int nelems;
|
||||
Datum *dvalues;
|
||||
int i = 0;
|
||||
int nelems;
|
||||
Datum *dvalues;
|
||||
int i = 0;
|
||||
|
||||
nelems = length(astate->elements);
|
||||
|
||||
@ -1683,7 +1686,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
|
||||
/* loop through and get data area from each element */
|
||||
foreach(element, astate->elements)
|
||||
{
|
||||
ExprState *e = (ExprState *) lfirst(element);
|
||||
ExprState *e = (ExprState *) lfirst(element);
|
||||
bool eisnull;
|
||||
Datum arraydatum;
|
||||
ArrayType *array;
|
||||
@ -1718,8 +1721,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
|
||||
elem_ndims * sizeof(int)) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
||||
errmsg("multidimensional arrays must have array "
|
||||
"expressions with matching dimensions")));
|
||||
errmsg("multidimensional arrays must have array "
|
||||
"expressions with matching dimensions")));
|
||||
}
|
||||
|
||||
elem_ndatabytes = ARR_SIZE(array) - ARR_OVERHEAD(elem_ndims);
|
||||
@ -1767,16 +1770,16 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
|
||||
ExecEvalCoalesce(CoalesceExprState * coalesceExpr, ExprContext *econtext,
|
||||
bool *isNull)
|
||||
{
|
||||
List *arg;
|
||||
List *arg;
|
||||
|
||||
/* Simply loop through until something NOT NULL is found */
|
||||
foreach(arg, coalesceExpr->args)
|
||||
{
|
||||
ExprState *e = (ExprState *) lfirst(arg);
|
||||
Datum value;
|
||||
ExprState *e = (ExprState *) lfirst(arg);
|
||||
Datum value;
|
||||
|
||||
value = ExecEvalExpr(e, econtext, isNull, NULL);
|
||||
if (!*isNull)
|
||||
@ -1787,7 +1790,7 @@ ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
|
||||
*isNull = true;
|
||||
return (Datum) 0;
|
||||
}
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
* ExecEvalNullIf
|
||||
*
|
||||
@ -1797,7 +1800,7 @@ ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalNullIf(FuncExprState *fcache, ExprContext *econtext,
|
||||
ExecEvalNullIf(FuncExprState * fcache, ExprContext *econtext,
|
||||
bool *isNull)
|
||||
{
|
||||
Datum result;
|
||||
@ -1856,7 +1859,7 @@ ExecEvalNullIf(FuncExprState *fcache, ExprContext *econtext,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalNullTest(GenericExprState *nstate,
|
||||
ExecEvalNullTest(GenericExprState * nstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -1901,7 +1904,7 @@ ExecEvalNullTest(GenericExprState *nstate,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalBooleanTest(GenericExprState *bstate,
|
||||
ExecEvalBooleanTest(GenericExprState * bstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -1987,7 +1990,7 @@ ExecEvalBooleanTest(GenericExprState *bstate,
|
||||
* datum) otherwise throw an error.
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
|
||||
ExecEvalCoerceToDomain(CoerceToDomainState * cstate, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone)
|
||||
{
|
||||
CoerceToDomain *ctest = (CoerceToDomain *) cstate->xprstate.expr;
|
||||
@ -2009,43 +2012,44 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
|
||||
if (*isNull)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_NOT_NULL_VIOLATION),
|
||||
errmsg("domain %s does not allow NULL values",
|
||||
format_type_be(ctest->resulttype))));
|
||||
errmsg("domain %s does not allow NULL values",
|
||||
format_type_be(ctest->resulttype))));
|
||||
break;
|
||||
case DOM_CONSTRAINT_CHECK:
|
||||
{
|
||||
Datum conResult;
|
||||
bool conIsNull;
|
||||
Datum save_datum;
|
||||
bool save_isNull;
|
||||
{
|
||||
Datum conResult;
|
||||
bool conIsNull;
|
||||
Datum save_datum;
|
||||
bool save_isNull;
|
||||
|
||||
/*
|
||||
* Set up value to be returned by CoerceToDomainValue nodes.
|
||||
* We must save and restore prior setting of econtext's
|
||||
* domainValue fields, in case this node is itself within
|
||||
* a check expression for another domain.
|
||||
*/
|
||||
save_datum = econtext->domainValue_datum;
|
||||
save_isNull = econtext->domainValue_isNull;
|
||||
/*
|
||||
* Set up value to be returned by CoerceToDomainValue
|
||||
* nodes. We must save and restore prior setting of
|
||||
* econtext's domainValue fields, in case this node is
|
||||
* itself within a check expression for another
|
||||
* domain.
|
||||
*/
|
||||
save_datum = econtext->domainValue_datum;
|
||||
save_isNull = econtext->domainValue_isNull;
|
||||
|
||||
econtext->domainValue_datum = result;
|
||||
econtext->domainValue_isNull = *isNull;
|
||||
econtext->domainValue_datum = result;
|
||||
econtext->domainValue_isNull = *isNull;
|
||||
|
||||
conResult = ExecEvalExpr(con->check_expr,
|
||||
econtext, &conIsNull, NULL);
|
||||
conResult = ExecEvalExpr(con->check_expr,
|
||||
econtext, &conIsNull, NULL);
|
||||
|
||||
if (!conIsNull &&
|
||||
!DatumGetBool(conResult))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CHECK_VIOLATION),
|
||||
errmsg("value for domain %s violates CHECK constraint \"%s\"",
|
||||
format_type_be(ctest->resulttype),
|
||||
con->name)));
|
||||
econtext->domainValue_datum = save_datum;
|
||||
econtext->domainValue_isNull = save_isNull;
|
||||
if (!conIsNull &&
|
||||
!DatumGetBool(conResult))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CHECK_VIOLATION),
|
||||
errmsg("value for domain %s violates CHECK constraint \"%s\"",
|
||||
format_type_be(ctest->resulttype),
|
||||
con->name)));
|
||||
econtext->domainValue_datum = save_datum;
|
||||
econtext->domainValue_isNull = save_isNull;
|
||||
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
elog(ERROR, "unrecognized constraint type: %d",
|
||||
(int) con->constrainttype);
|
||||
@ -2063,7 +2067,7 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
|
||||
* Return the value stored by CoerceToDomain.
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
|
||||
ExecEvalCoerceToDomainValue(CoerceToDomainValue * conVal,
|
||||
ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
*isNull = econtext->domainValue_isNull;
|
||||
@ -2077,7 +2081,7 @@ ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static Datum
|
||||
ExecEvalFieldSelect(GenericExprState *fstate,
|
||||
ExecEvalFieldSelect(GenericExprState * fstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -2141,7 +2145,7 @@ ExecEvalFieldSelect(GenericExprState *fstate,
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
Datum
|
||||
ExecEvalExpr(ExprState *expression,
|
||||
ExecEvalExpr(ExprState * expression,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -2308,7 +2312,7 @@ ExecEvalExpr(ExprState *expression,
|
||||
* Same as above, but get into the right allocation context explicitly.
|
||||
*/
|
||||
Datum
|
||||
ExecEvalExprSwitchContext(ExprState *expression,
|
||||
ExecEvalExprSwitchContext(ExprState * expression,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
@ -2327,7 +2331,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
|
||||
* ExecInitExpr: prepare an expression tree for execution
|
||||
*
|
||||
* This function builds and returns an ExprState tree paralleling the given
|
||||
* Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
|
||||
* Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
|
||||
* for execution. Because the Expr tree itself is read-only as far as
|
||||
* ExecInitExpr and ExecEvalExpr are concerned, several different executions
|
||||
* of the same plan tree can occur concurrently.
|
||||
@ -2337,7 +2341,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
|
||||
* the same as the per-query context of the associated ExprContext.
|
||||
*
|
||||
* Any Aggref and SubPlan nodes found in the tree are added to the lists
|
||||
* of such nodes held by the parent PlanState. Otherwise, we do very little
|
||||
* of such nodes held by the parent PlanState. Otherwise, we do very little
|
||||
* initialization here other than building the state-node tree. Any nontrivial
|
||||
* work associated with initializing runtime info for a node should happen
|
||||
* during the first actual evaluation of that node. (This policy lets us
|
||||
@ -2356,7 +2360,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
|
||||
* This case should usually come through ExecPrepareExpr, not directly here.
|
||||
*/
|
||||
ExprState *
|
||||
ExecInitExpr(Expr *node, PlanState *parent)
|
||||
ExecInitExpr(Expr *node, PlanState * parent)
|
||||
{
|
||||
ExprState *state;
|
||||
|
||||
@ -2373,7 +2377,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_Aggref:
|
||||
{
|
||||
Aggref *aggref = (Aggref *) node;
|
||||
Aggref *aggref = (Aggref *) node;
|
||||
AggrefExprState *astate = makeNode(AggrefExprState);
|
||||
|
||||
if (parent && IsA(parent, AggState))
|
||||
@ -2389,8 +2393,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
/*
|
||||
* Complain if the aggregate's argument contains any
|
||||
* aggregates; nested agg functions are semantically
|
||||
* nonsensical. (This should have been caught earlier,
|
||||
* but we defend against it here anyway.)
|
||||
* nonsensical. (This should have been caught
|
||||
* earlier, but we defend against it here anyway.)
|
||||
*/
|
||||
if (naggs != aggstate->numaggs)
|
||||
ereport(ERROR,
|
||||
@ -2433,41 +2437,41 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
|
||||
fstate->args = (List *)
|
||||
ExecInitExpr((Expr *) funcexpr->args, parent);
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
state = (ExprState *) fstate;
|
||||
}
|
||||
break;
|
||||
case T_OpExpr:
|
||||
{
|
||||
OpExpr *opexpr = (OpExpr *) node;
|
||||
OpExpr *opexpr = (OpExpr *) node;
|
||||
FuncExprState *fstate = makeNode(FuncExprState);
|
||||
|
||||
fstate->args = (List *)
|
||||
ExecInitExpr((Expr *) opexpr->args, parent);
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
state = (ExprState *) fstate;
|
||||
}
|
||||
break;
|
||||
case T_DistinctExpr:
|
||||
{
|
||||
DistinctExpr *distinctexpr = (DistinctExpr *) node;
|
||||
DistinctExpr *distinctexpr = (DistinctExpr *) node;
|
||||
FuncExprState *fstate = makeNode(FuncExprState);
|
||||
|
||||
fstate->args = (List *)
|
||||
ExecInitExpr((Expr *) distinctexpr->args, parent);
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
state = (ExprState *) fstate;
|
||||
}
|
||||
break;
|
||||
case T_ScalarArrayOpExpr:
|
||||
{
|
||||
ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
|
||||
ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
|
||||
ScalarArrayOpExprState *sstate = makeNode(ScalarArrayOpExprState);
|
||||
|
||||
sstate->fxprstate.args = (List *)
|
||||
ExecInitExpr((Expr *) opexpr->args, parent);
|
||||
sstate->fxprstate.func.fn_oid = InvalidOid; /* not initialized */
|
||||
sstate->element_type = InvalidOid; /* ditto */
|
||||
sstate->fxprstate.func.fn_oid = InvalidOid; /* not initialized */
|
||||
sstate->element_type = InvalidOid; /* ditto */
|
||||
state = (ExprState *) sstate;
|
||||
}
|
||||
break;
|
||||
@ -2484,7 +2488,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
case T_SubPlan:
|
||||
{
|
||||
/* Keep this in sync with ExecInitExprInitPlan, below */
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlan *subplan = (SubPlan *) node;
|
||||
SubPlanState *sstate = makeNode(SubPlanState);
|
||||
|
||||
if (!parent)
|
||||
@ -2492,7 +2496,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
|
||||
/*
|
||||
* Here we just add the SubPlanState nodes to
|
||||
* parent->subPlan. The subplans will be initialized later.
|
||||
* parent->subPlan. The subplans will be initialized
|
||||
* later.
|
||||
*/
|
||||
parent->subPlan = lcons(sstate, parent->subPlan);
|
||||
sstate->sub_estate = NULL;
|
||||
@ -2508,7 +2513,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_FieldSelect:
|
||||
{
|
||||
FieldSelect *fselect = (FieldSelect *) node;
|
||||
FieldSelect *fselect = (FieldSelect *) node;
|
||||
GenericExprState *gstate = makeNode(GenericExprState);
|
||||
|
||||
gstate->arg = ExecInitExpr(fselect->arg, parent);
|
||||
@ -2517,7 +2522,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_RelabelType:
|
||||
{
|
||||
RelabelType *relabel = (RelabelType *) node;
|
||||
RelabelType *relabel = (RelabelType *) node;
|
||||
GenericExprState *gstate = makeNode(GenericExprState);
|
||||
|
||||
gstate->arg = ExecInitExpr(relabel->arg, parent);
|
||||
@ -2552,10 +2557,10 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_ArrayExpr:
|
||||
{
|
||||
ArrayExpr *arrayexpr = (ArrayExpr *) node;
|
||||
ArrayExpr *arrayexpr = (ArrayExpr *) node;
|
||||
ArrayExprState *astate = makeNode(ArrayExprState);
|
||||
FastList outlist;
|
||||
List *inlist;
|
||||
FastList outlist;
|
||||
List *inlist;
|
||||
|
||||
FastListInit(&outlist);
|
||||
foreach(inlist, arrayexpr->elements)
|
||||
@ -2585,8 +2590,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
FastListInit(&outlist);
|
||||
foreach(inlist, coalesceexpr->args)
|
||||
{
|
||||
Expr *e = (Expr *) lfirst(inlist);
|
||||
ExprState *estate;
|
||||
Expr *e = (Expr *) lfirst(inlist);
|
||||
ExprState *estate;
|
||||
|
||||
estate = ExecInitExpr(e, parent);
|
||||
FastAppend(&outlist, estate);
|
||||
@ -2602,7 +2607,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
|
||||
fstate->args = (List *)
|
||||
ExecInitExpr((Expr *) nullifexpr->args, parent);
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
fstate->func.fn_oid = InvalidOid; /* not initialized */
|
||||
state = (ExprState *) fstate;
|
||||
}
|
||||
break;
|
||||
@ -2617,7 +2622,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_BooleanTest:
|
||||
{
|
||||
BooleanTest *btest = (BooleanTest *) node;
|
||||
BooleanTest *btest = (BooleanTest *) node;
|
||||
GenericExprState *gstate = makeNode(GenericExprState);
|
||||
|
||||
gstate->arg = ExecInitExpr(btest->arg, parent);
|
||||
@ -2626,7 +2631,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_CoerceToDomain:
|
||||
{
|
||||
CoerceToDomain *ctest = (CoerceToDomain *) node;
|
||||
CoerceToDomain *ctest = (CoerceToDomain *) node;
|
||||
CoerceToDomainState *cstate = makeNode(CoerceToDomainState);
|
||||
|
||||
cstate->arg = ExecInitExpr(ctest->arg, parent);
|
||||
@ -2636,7 +2641,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_TargetEntry:
|
||||
{
|
||||
TargetEntry *tle = (TargetEntry *) node;
|
||||
TargetEntry *tle = (TargetEntry *) node;
|
||||
GenericExprState *gstate = makeNode(GenericExprState);
|
||||
|
||||
gstate->arg = ExecInitExpr(tle->expr, parent);
|
||||
@ -2673,12 +2678,12 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
|
||||
/*
|
||||
* ExecInitExprInitPlan --- initialize a subplan expr that's being handled
|
||||
* as an InitPlan. This is identical to ExecInitExpr's handling of a regular
|
||||
* as an InitPlan. This is identical to ExecInitExpr's handling of a regular
|
||||
* subplan expr, except we do NOT want to add the node to the parent's
|
||||
* subplan list.
|
||||
*/
|
||||
SubPlanState *
|
||||
ExecInitExprInitPlan(SubPlan *node, PlanState *parent)
|
||||
ExecInitExprInitPlan(SubPlan *node, PlanState * parent)
|
||||
{
|
||||
SubPlanState *sstate = makeNode(SubPlanState);
|
||||
|
||||
@ -2704,7 +2709,7 @@ ExecInitExprInitPlan(SubPlan *node, PlanState *parent)
|
||||
* This differs from ExecInitExpr in that we don't assume the caller is
|
||||
* already running in the EState's per-query context. Also, we apply
|
||||
* fix_opfuncids() to the passed expression tree to be sure it is ready
|
||||
* to run. (In ordinary Plan trees the planner will have fixed opfuncids,
|
||||
* to run. (In ordinary Plan trees the planner will have fixed opfuncids,
|
||||
* but callers outside the executor will not have done this.)
|
||||
*/
|
||||
ExprState *
|
||||
@ -2988,8 +2993,8 @@ ExecTargetList(List *targetlist,
|
||||
if (itemIsDone[resind] == ExprEndResult)
|
||||
{
|
||||
/*
|
||||
* Oh dear, this item is returning an empty
|
||||
* set. Guess we can't make a tuple after all.
|
||||
* Oh dear, this item is returning an empty set.
|
||||
* Guess we can't make a tuple after all.
|
||||
*/
|
||||
*isDone = ExprEndResult;
|
||||
break;
|
||||
|
@ -12,7 +12,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.23 2003/02/03 15:07:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.24 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -45,7 +45,7 @@ static bool tlist_matches_tupdesc(List *tlist, Index varno, TupleDesc tupdesc);
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
TupleTableSlot *
|
||||
ExecScan(ScanState *node,
|
||||
ExecScan(ScanState * node,
|
||||
ExecScanAccessMtd accessMtd) /* function returning a tuple */
|
||||
{
|
||||
EState *estate;
|
||||
@ -134,9 +134,10 @@ ExecScan(ScanState *node,
|
||||
if (projInfo)
|
||||
{
|
||||
/*
|
||||
* Form a projection tuple, store it in the result tuple slot
|
||||
* and return it --- unless we find we can project no tuples
|
||||
* from this scan tuple, in which case continue scan.
|
||||
* Form a projection tuple, store it in the result tuple
|
||||
* slot and return it --- unless we find we can project no
|
||||
* tuples from this scan tuple, in which case continue
|
||||
* scan.
|
||||
*/
|
||||
resultSlot = ExecProject(projInfo, &isDone);
|
||||
if (isDone != ExprEndResult)
|
||||
@ -175,13 +176,13 @@ ExecScan(ScanState *node,
|
||||
* ExecAssignScanType must have been called already.
|
||||
*/
|
||||
void
|
||||
ExecAssignScanProjectionInfo(ScanState *node)
|
||||
ExecAssignScanProjectionInfo(ScanState * node)
|
||||
{
|
||||
Scan *scan = (Scan *) node->ps.plan;
|
||||
Scan *scan = (Scan *) node->ps.plan;
|
||||
|
||||
if (tlist_matches_tupdesc(scan->plan.targetlist,
|
||||
scan->scanrelid,
|
||||
node->ss_ScanTupleSlot->ttc_tupleDescriptor))
|
||||
node->ss_ScanTupleSlot->ttc_tupleDescriptor))
|
||||
node->ps.ps_ProjInfo = NULL;
|
||||
else
|
||||
ExecAssignProjectionInfo(&node->ps);
|
||||
@ -190,13 +191,13 @@ ExecAssignScanProjectionInfo(ScanState *node)
|
||||
static bool
|
||||
tlist_matches_tupdesc(List *tlist, Index varno, TupleDesc tupdesc)
|
||||
{
|
||||
int numattrs = tupdesc->natts;
|
||||
int attrno;
|
||||
int numattrs = tupdesc->natts;
|
||||
int attrno;
|
||||
|
||||
for (attrno = 1; attrno <= numattrs; attrno++)
|
||||
{
|
||||
Form_pg_attribute att_tup = tupdesc->attrs[attrno - 1];
|
||||
Var *var;
|
||||
Var *var;
|
||||
|
||||
if (tlist == NIL)
|
||||
return false; /* tlist too short */
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.67 2003/07/21 17:05:09 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.68 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -482,7 +482,7 @@ ExecSetSlotDescriptorIsNew(TupleTableSlot *slot, /* slot to change */
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
|
||||
ExecInitResultTupleSlot(EState *estate, PlanState * planstate)
|
||||
{
|
||||
INIT_SLOT_DEFS;
|
||||
INIT_SLOT_ALLOC;
|
||||
@ -494,7 +494,7 @@ ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecInitScanTupleSlot(EState *estate, ScanState *scanstate)
|
||||
ExecInitScanTupleSlot(EState *estate, ScanState * scanstate)
|
||||
{
|
||||
INIT_SLOT_DEFS;
|
||||
INIT_SLOT_ALLOC;
|
||||
@ -807,7 +807,7 @@ do_text_output_multiline(TupOutputState *tstate, char *text)
|
||||
if (eol)
|
||||
*eol++ = '\0';
|
||||
else
|
||||
eol = text + strlen(text);
|
||||
eol = text +strlen(text);
|
||||
|
||||
do_tup_output(tstate, &text);
|
||||
text = eol;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.100 2003/05/28 16:03:56 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.101 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -166,8 +166,8 @@ CreateExecutorState(void)
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
/*
|
||||
* Make the EState node within the per-query context. This way,
|
||||
* we don't need a separate pfree() operation for it at shutdown.
|
||||
* Make the EState node within the per-query context. This way, we
|
||||
* don't need a separate pfree() operation for it at shutdown.
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(qcontext);
|
||||
|
||||
@ -248,6 +248,7 @@ FreeExecutorState(EState *estate)
|
||||
FreeExprContext((ExprContext *) lfirst(estate->es_exprcontexts));
|
||||
/* FreeExprContext removed the list link for us */
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the per-query memory context, thereby releasing all working
|
||||
* memory, including the EState node itself.
|
||||
@ -310,10 +311,10 @@ CreateExprContext(EState *estate)
|
||||
econtext->ecxt_callbacks = NULL;
|
||||
|
||||
/*
|
||||
* Link the ExprContext into the EState to ensure it is shut down
|
||||
* when the EState is freed. Because we use lcons(), shutdowns will
|
||||
* occur in reverse order of creation, which may not be essential
|
||||
* but can't hurt.
|
||||
* Link the ExprContext into the EState to ensure it is shut down when
|
||||
* the EState is freed. Because we use lcons(), shutdowns will occur
|
||||
* in reverse order of creation, which may not be essential but can't
|
||||
* hurt.
|
||||
*/
|
||||
estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
|
||||
|
||||
@ -377,14 +378,14 @@ MakePerTupleExprContext(EState *estate)
|
||||
/* ----------------
|
||||
* ExecAssignExprContext
|
||||
*
|
||||
* This initializes the ps_ExprContext field. It is only necessary
|
||||
* This initializes the ps_ExprContext field. It is only necessary
|
||||
* to do this for nodes which use ExecQual or ExecProject
|
||||
* because those routines require an econtext. Other nodes that
|
||||
* because those routines require an econtext. Other nodes that
|
||||
* don't have to evaluate expressions don't need to do this.
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecAssignExprContext(EState *estate, PlanState *planstate)
|
||||
ExecAssignExprContext(EState *estate, PlanState * planstate)
|
||||
{
|
||||
planstate->ps_ExprContext = CreateExprContext(estate);
|
||||
}
|
||||
@ -394,7 +395,7 @@ ExecAssignExprContext(EState *estate, PlanState *planstate)
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecAssignResultType(PlanState *planstate,
|
||||
ExecAssignResultType(PlanState * planstate,
|
||||
TupleDesc tupDesc, bool shouldFree)
|
||||
{
|
||||
TupleTableSlot *slot = planstate->ps_ResultTupleSlot;
|
||||
@ -407,7 +408,7 @@ ExecAssignResultType(PlanState *planstate,
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecAssignResultTypeFromOuterPlan(PlanState *planstate)
|
||||
ExecAssignResultTypeFromOuterPlan(PlanState * planstate)
|
||||
{
|
||||
PlanState *outerPlan;
|
||||
TupleDesc tupDesc;
|
||||
@ -423,7 +424,7 @@ ExecAssignResultTypeFromOuterPlan(PlanState *planstate)
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecAssignResultTypeFromTL(PlanState *planstate)
|
||||
ExecAssignResultTypeFromTL(PlanState * planstate)
|
||||
{
|
||||
bool hasoid = false;
|
||||
TupleDesc tupDesc;
|
||||
@ -445,9 +446,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
|
||||
* each of the child plans of the topmost Append plan. So, this is
|
||||
* ugly but it works, for now ...
|
||||
*
|
||||
* SELECT INTO is also pretty grotty, because we don't yet have the
|
||||
* INTO relation's descriptor at this point; we have to look aside
|
||||
* at a flag set by InitPlan().
|
||||
* SELECT INTO is also pretty grotty, because we don't yet have the INTO
|
||||
* relation's descriptor at this point; we have to look aside at a
|
||||
* flag set by InitPlan().
|
||||
*/
|
||||
if (planstate->state->es_force_oids)
|
||||
hasoid = true;
|
||||
@ -465,9 +466,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
|
||||
}
|
||||
|
||||
/*
|
||||
* ExecTypeFromTL needs the parse-time representation of the tlist, not
|
||||
* a list of ExprStates. This is good because some plan nodes don't
|
||||
* bother to set up planstate->targetlist ...
|
||||
* ExecTypeFromTL needs the parse-time representation of the tlist,
|
||||
* not a list of ExprStates. This is good because some plan nodes
|
||||
* don't bother to set up planstate->targetlist ...
|
||||
*/
|
||||
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
|
||||
ExecAssignResultType(planstate, tupDesc, true);
|
||||
@ -478,7 +479,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
|
||||
* ----------------
|
||||
*/
|
||||
TupleDesc
|
||||
ExecGetResultType(PlanState *planstate)
|
||||
ExecGetResultType(PlanState * planstate)
|
||||
{
|
||||
TupleTableSlot *slot = planstate->ps_ResultTupleSlot;
|
||||
|
||||
@ -524,7 +525,7 @@ ExecBuildProjectionInfo(List *targetList,
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecAssignProjectionInfo(PlanState *planstate)
|
||||
ExecAssignProjectionInfo(PlanState * planstate)
|
||||
{
|
||||
planstate->ps_ProjInfo =
|
||||
ExecBuildProjectionInfo(planstate->targetlist,
|
||||
@ -543,7 +544,7 @@ ExecAssignProjectionInfo(PlanState *planstate)
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecFreeExprContext(PlanState *planstate)
|
||||
ExecFreeExprContext(PlanState * planstate)
|
||||
{
|
||||
ExprContext *econtext;
|
||||
|
||||
@ -575,7 +576,7 @@ ExecFreeExprContext(PlanState *planstate)
|
||||
* ----------------
|
||||
*/
|
||||
TupleDesc
|
||||
ExecGetScanType(ScanState *scanstate)
|
||||
ExecGetScanType(ScanState * scanstate)
|
||||
{
|
||||
TupleTableSlot *slot = scanstate->ss_ScanTupleSlot;
|
||||
|
||||
@ -587,7 +588,7 @@ ExecGetScanType(ScanState *scanstate)
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecAssignScanType(ScanState *scanstate,
|
||||
ExecAssignScanType(ScanState * scanstate,
|
||||
TupleDesc tupDesc, bool shouldFree)
|
||||
{
|
||||
TupleTableSlot *slot = scanstate->ss_ScanTupleSlot;
|
||||
@ -600,7 +601,7 @@ ExecAssignScanType(ScanState *scanstate,
|
||||
* ----------------
|
||||
*/
|
||||
void
|
||||
ExecAssignScanTypeFromOuterPlan(ScanState *scanstate)
|
||||
ExecAssignScanTypeFromOuterPlan(ScanState * scanstate)
|
||||
{
|
||||
PlanState *outerPlan;
|
||||
TupleDesc tupDesc;
|
||||
@ -795,8 +796,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
|
||||
|
||||
/*
|
||||
* We will use the EState's per-tuple context for evaluating
|
||||
* predicates and index expressions (creating it if it's not
|
||||
* already there).
|
||||
* predicates and index expressions (creating it if it's not already
|
||||
* there).
|
||||
*/
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
|
||||
@ -841,8 +842,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
|
||||
|
||||
/*
|
||||
* FormIndexDatum fills in its datum and null parameters with
|
||||
* attribute information taken from the given heap tuple.
|
||||
* It also computes any expressions needed.
|
||||
* attribute information taken from the given heap tuple. It also
|
||||
* computes any expressions needed.
|
||||
*/
|
||||
FormIndexDatum(indexInfo,
|
||||
heapTuple,
|
||||
@ -878,7 +879,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
|
||||
* Add changed parameters to a plan node's chgParam set
|
||||
*/
|
||||
void
|
||||
UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
|
||||
UpdateChangedParamSet(PlanState * node, Bitmapset * newchg)
|
||||
{
|
||||
Bitmapset *parmset;
|
||||
|
||||
@ -887,6 +888,7 @@ UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
|
||||
* Don't include anything else into its chgParam set.
|
||||
*/
|
||||
parmset = bms_intersect(node->plan->allParam, newchg);
|
||||
|
||||
/*
|
||||
* Keep node->chgParam == NULL if there's not actually any members;
|
||||
* this allows the simplest possible tests in executor node files.
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.69 2003/07/28 18:33:18 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.70 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -29,8 +29,8 @@
|
||||
|
||||
|
||||
/*
|
||||
* We have an execution_state record for each query in a function. Each
|
||||
* record contains a querytree and plantree for its query. If the query
|
||||
* We have an execution_state record for each query in a function. Each
|
||||
* record contains a querytree and plantree for its query. If the query
|
||||
* is currently in F_EXEC_RUN state then there's a QueryDesc too.
|
||||
*/
|
||||
typedef enum
|
||||
@ -83,7 +83,7 @@ static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
|
||||
static TupleTableSlot *postquel_getnext(execution_state *es);
|
||||
static void postquel_end(execution_state *es);
|
||||
static void postquel_sub_params(SQLFunctionCachePtr fcache,
|
||||
FunctionCallInfo fcinfo);
|
||||
FunctionCallInfo fcinfo);
|
||||
static Datum postquel_execute(execution_state *es,
|
||||
FunctionCallInfo fcinfo,
|
||||
SQLFunctionCachePtr fcache);
|
||||
@ -177,11 +177,11 @@ init_sql_fcache(FmgrInfo *finfo)
|
||||
if (rettype == ANYARRAYOID || rettype == ANYELEMENTOID)
|
||||
{
|
||||
rettype = get_fn_expr_rettype(finfo);
|
||||
if (rettype == InvalidOid) /* this probably should not happen */
|
||||
if (rettype == InvalidOid) /* this probably should not happen */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("could not determine actual result type for function declared %s",
|
||||
format_type_be(procedureStruct->prorettype))));
|
||||
format_type_be(procedureStruct->prorettype))));
|
||||
}
|
||||
|
||||
/* Now look up the actual result type */
|
||||
@ -226,7 +226,7 @@ init_sql_fcache(FmgrInfo *finfo)
|
||||
fcache->funcSlot = NULL;
|
||||
|
||||
/*
|
||||
* Parse and plan the queries. We need the argument type info to pass
|
||||
* Parse and plan the queries. We need the argument type info to pass
|
||||
* to the parser.
|
||||
*/
|
||||
nargs = procedureStruct->pronargs;
|
||||
@ -234,7 +234,7 @@ init_sql_fcache(FmgrInfo *finfo)
|
||||
|
||||
if (nargs > 0)
|
||||
{
|
||||
int argnum;
|
||||
int argnum;
|
||||
|
||||
argOidVect = (Oid *) palloc(nargs * sizeof(Oid));
|
||||
memcpy(argOidVect,
|
||||
@ -243,7 +243,7 @@ init_sql_fcache(FmgrInfo *finfo)
|
||||
/* Resolve any polymorphic argument types */
|
||||
for (argnum = 0; argnum < nargs; argnum++)
|
||||
{
|
||||
Oid argtype = argOidVect[argnum];
|
||||
Oid argtype = argOidVect[argnum];
|
||||
|
||||
if (argtype == ANYARRAYOID || argtype == ANYELEMENTOID)
|
||||
{
|
||||
@ -309,7 +309,7 @@ postquel_getnext(execution_state *es)
|
||||
|
||||
/*
|
||||
* If it's the function's last command, and it's a SELECT, fetch one
|
||||
* row at a time so we can return the results. Otherwise just run it
|
||||
* row at a time so we can return the results. Otherwise just run it
|
||||
* to completion.
|
||||
*/
|
||||
if (LAST_POSTQUEL_COMMAND(es) && es->qd->operation == CMD_SELECT)
|
||||
@ -655,14 +655,14 @@ sql_exec_error_callback(void *arg)
|
||||
/*
|
||||
* Try to determine where in the function we failed. If there is a
|
||||
* query with non-null QueryDesc, finger it. (We check this rather
|
||||
* than looking for F_EXEC_RUN state, so that errors during ExecutorStart
|
||||
* or ExecutorEnd are blamed on the appropriate query; see postquel_start
|
||||
* and postquel_end.)
|
||||
* than looking for F_EXEC_RUN state, so that errors during
|
||||
* ExecutorStart or ExecutorEnd are blamed on the appropriate query;
|
||||
* see postquel_start and postquel_end.)
|
||||
*/
|
||||
if (fcache)
|
||||
{
|
||||
execution_state *es;
|
||||
int query_num;
|
||||
int query_num;
|
||||
|
||||
es = fcache->func_state;
|
||||
query_num = 1;
|
||||
|
@ -45,7 +45,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.112 2003/08/01 00:15:21 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.113 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -173,12 +173,12 @@ typedef struct AggStatePerGroupData
|
||||
* later input value. Only the first non-NULL input will be
|
||||
* auto-substituted.
|
||||
*/
|
||||
} AggStatePerGroupData;
|
||||
} AggStatePerGroupData;
|
||||
|
||||
/*
|
||||
* To implement hashed aggregation, we need a hashtable that stores a
|
||||
* representative tuple and an array of AggStatePerGroup structs for each
|
||||
* distinct set of GROUP BY column values. We compute the hash key from
|
||||
* distinct set of GROUP BY column values. We compute the hash key from
|
||||
* the GROUP BY columns.
|
||||
*/
|
||||
typedef struct AggHashEntryData *AggHashEntry;
|
||||
@ -188,27 +188,27 @@ typedef struct AggHashEntryData
|
||||
TupleHashEntryData shared; /* common header for hash table entries */
|
||||
/* per-aggregate transition status array - must be last! */
|
||||
AggStatePerGroupData pergroup[1]; /* VARIABLE LENGTH ARRAY */
|
||||
} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
|
||||
} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
|
||||
|
||||
|
||||
static void initialize_aggregates(AggState *aggstate,
|
||||
AggStatePerAgg peragg,
|
||||
AggStatePerGroup pergroup);
|
||||
AggStatePerAgg peragg,
|
||||
AggStatePerGroup pergroup);
|
||||
static void advance_transition_function(AggState *aggstate,
|
||||
AggStatePerAgg peraggstate,
|
||||
AggStatePerGroup pergroupstate,
|
||||
Datum newVal, bool isNull);
|
||||
AggStatePerAgg peraggstate,
|
||||
AggStatePerGroup pergroupstate,
|
||||
Datum newVal, bool isNull);
|
||||
static void advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup);
|
||||
static void process_sorted_aggregate(AggState *aggstate,
|
||||
AggStatePerAgg peraggstate,
|
||||
AggStatePerGroup pergroupstate);
|
||||
AggStatePerAgg peraggstate,
|
||||
AggStatePerGroup pergroupstate);
|
||||
static void finalize_aggregate(AggState *aggstate,
|
||||
AggStatePerAgg peraggstate,
|
||||
AggStatePerGroup pergroupstate,
|
||||
Datum *resultVal, bool *resultIsNull);
|
||||
AggStatePerAgg peraggstate,
|
||||
AggStatePerGroup pergroupstate,
|
||||
Datum *resultVal, bool *resultIsNull);
|
||||
static void build_hash_table(AggState *aggstate);
|
||||
static AggHashEntry lookup_hash_entry(AggState *aggstate,
|
||||
TupleTableSlot *slot);
|
||||
TupleTableSlot *slot);
|
||||
static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
|
||||
static void agg_fill_hash_table(AggState *aggstate);
|
||||
static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
|
||||
@ -231,7 +231,7 @@ initialize_aggregates(AggState *aggstate,
|
||||
{
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
AggStatePerGroup pergroupstate = &pergroup[aggno];
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
|
||||
/*
|
||||
* Start a fresh sort operation for each DISTINCT aggregate.
|
||||
@ -265,18 +265,18 @@ initialize_aggregates(AggState *aggstate,
|
||||
|
||||
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
|
||||
pergroupstate->transValue = datumCopy(peraggstate->initValue,
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
}
|
||||
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
|
||||
|
||||
/*
|
||||
* If the initial value for the transition state doesn't exist in the
|
||||
* pg_aggregate table then we will let the first non-NULL value
|
||||
* returned from the outer procNode become the initial value. (This is
|
||||
* useful for aggregates like max() and min().) The noTransValue flag
|
||||
* signals that we still need to do this.
|
||||
* If the initial value for the transition state doesn't exist in
|
||||
* the pg_aggregate table then we will let the first non-NULL
|
||||
* value returned from the outer procNode become the initial
|
||||
* value. (This is useful for aggregates like max() and min().)
|
||||
* The noTransValue flag signals that we still need to do this.
|
||||
*/
|
||||
pergroupstate->noTransValue = peraggstate->initValueIsNull;
|
||||
}
|
||||
@ -299,8 +299,8 @@ advance_transition_function(AggState *aggstate,
|
||||
if (peraggstate->transfn.fn_strict)
|
||||
{
|
||||
/*
|
||||
* For a strict transfn, nothing happens at a NULL input
|
||||
* tuple; we just keep the prior transValue.
|
||||
* For a strict transfn, nothing happens at a NULL input tuple; we
|
||||
* just keep the prior transValue.
|
||||
*/
|
||||
if (isNull)
|
||||
return;
|
||||
@ -314,12 +314,13 @@ advance_transition_function(AggState *aggstate,
|
||||
* here is OK.)
|
||||
*
|
||||
* We must copy the datum into aggcontext if it is pass-by-ref.
|
||||
* We do not need to pfree the old transValue, since it's NULL.
|
||||
* We do not need to pfree the old transValue, since it's
|
||||
* NULL.
|
||||
*/
|
||||
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
|
||||
pergroupstate->transValue = datumCopy(newVal,
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
pergroupstate->transValueIsNull = false;
|
||||
pergroupstate->noTransValue = false;
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
@ -363,12 +364,12 @@ advance_transition_function(AggState *aggstate,
|
||||
newVal = FunctionCallInvoke(&fcinfo);
|
||||
|
||||
/*
|
||||
* If pass-by-ref datatype, must copy the new value into aggcontext and
|
||||
* pfree the prior transValue. But if transfn returned a pointer to its
|
||||
* first input, we don't need to do anything.
|
||||
* If pass-by-ref datatype, must copy the new value into aggcontext
|
||||
* and pfree the prior transValue. But if transfn returned a pointer
|
||||
* to its first input, we don't need to do anything.
|
||||
*/
|
||||
if (!peraggstate->transtypeByVal &&
|
||||
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
|
||||
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
|
||||
{
|
||||
if (!fcinfo.isnull)
|
||||
{
|
||||
@ -388,7 +389,7 @@ advance_transition_function(AggState *aggstate,
|
||||
}
|
||||
|
||||
/*
|
||||
* Advance all the aggregates for one input tuple. The input tuple
|
||||
* Advance all the aggregates for one input tuple. The input tuple
|
||||
* has been stored in tmpcontext->ecxt_scantuple, so that it is accessible
|
||||
* to ExecEvalExpr. pergroup is the array of per-group structs to use
|
||||
* (this might be in a hashtable entry).
|
||||
@ -467,8 +468,8 @@ process_sorted_aggregate(AggState *aggstate,
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Clear and select the working context for evaluation of
|
||||
* the equality function and transition function.
|
||||
* Clear and select the working context for evaluation of the
|
||||
* equality function and transition function.
|
||||
*/
|
||||
MemoryContextReset(workcontext);
|
||||
oldContext = MemoryContextSwitchTo(workcontext);
|
||||
@ -570,9 +571,9 @@ finalize_aggregate(AggState *aggstate,
|
||||
static void
|
||||
build_hash_table(AggState *aggstate)
|
||||
{
|
||||
Agg *node = (Agg *) aggstate->ss.ps.plan;
|
||||
MemoryContext tmpmem = aggstate->tmpcontext->ecxt_per_tuple_memory;
|
||||
Size entrysize;
|
||||
Agg *node = (Agg *) aggstate->ss.ps.plan;
|
||||
MemoryContext tmpmem = aggstate->tmpcontext->ecxt_per_tuple_memory;
|
||||
Size entrysize;
|
||||
|
||||
Assert(node->aggstrategy == AGG_HASHED);
|
||||
Assert(node->numGroups > 0);
|
||||
@ -622,9 +623,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *slot)
|
||||
* the appropriate attribute for each aggregate function use (Aggref
|
||||
* node) appearing in the targetlist or qual of the node. The number
|
||||
* of tuples to aggregate over depends on whether grouped or plain
|
||||
* aggregation is selected. In grouped aggregation, we produce a result
|
||||
* aggregation is selected. In grouped aggregation, we produce a result
|
||||
* row for each group; in plain aggregation there's a single result row
|
||||
* for the whole query. In either case, the value of each aggregate is
|
||||
* for the whole query. In either case, the value of each aggregate is
|
||||
* stored in the expression context to be used when ExecProject evaluates
|
||||
* the result tuple.
|
||||
*/
|
||||
@ -641,9 +642,7 @@ ExecAgg(AggState *node)
|
||||
return agg_retrieve_hash_table(node);
|
||||
}
|
||||
else
|
||||
{
|
||||
return agg_retrieve_direct(node);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -736,7 +735,7 @@ agg_retrieve_direct(AggState *aggstate)
|
||||
firstSlot,
|
||||
InvalidBuffer,
|
||||
true);
|
||||
aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
|
||||
aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
|
||||
|
||||
/* set up for first advance_aggregates call */
|
||||
tmpcontext->ecxt_scantuple = firstSlot;
|
||||
@ -773,7 +772,7 @@ agg_retrieve_direct(AggState *aggstate)
|
||||
firstSlot->ttc_tupleDescriptor,
|
||||
node->numCols, node->grpColIdx,
|
||||
aggstate->eqfunctions,
|
||||
tmpcontext->ecxt_per_tuple_memory))
|
||||
tmpcontext->ecxt_per_tuple_memory))
|
||||
{
|
||||
/*
|
||||
* Save the first input tuple of the next group.
|
||||
@ -806,15 +805,15 @@ agg_retrieve_direct(AggState *aggstate)
|
||||
* anything), create a dummy all-nulls input tuple for use by
|
||||
* ExecProject. 99.44% of the time this is a waste of cycles,
|
||||
* because ordinarily the projected output tuple's targetlist
|
||||
* cannot contain any direct (non-aggregated) references to
|
||||
* input columns, so the dummy tuple will not be referenced.
|
||||
* However there are special cases where this isn't so --- in
|
||||
* particular an UPDATE involving an aggregate will have a
|
||||
* targetlist reference to ctid. We need to return a null for
|
||||
* ctid in that situation, not coredump.
|
||||
* cannot contain any direct (non-aggregated) references to input
|
||||
* columns, so the dummy tuple will not be referenced. However
|
||||
* there are special cases where this isn't so --- in particular
|
||||
* an UPDATE involving an aggregate will have a targetlist
|
||||
* reference to ctid. We need to return a null for ctid in that
|
||||
* situation, not coredump.
|
||||
*
|
||||
* The values returned for the aggregates will be the initial
|
||||
* values of the transition functions.
|
||||
* The values returned for the aggregates will be the initial values
|
||||
* of the transition functions.
|
||||
*/
|
||||
if (TupIsNull(firstSlot))
|
||||
{
|
||||
@ -872,7 +871,7 @@ agg_fill_hash_table(AggState *aggstate)
|
||||
{
|
||||
PlanState *outerPlan;
|
||||
ExprContext *tmpcontext;
|
||||
AggHashEntry entry;
|
||||
AggHashEntry entry;
|
||||
TupleTableSlot *outerslot;
|
||||
|
||||
/*
|
||||
@ -883,8 +882,8 @@ agg_fill_hash_table(AggState *aggstate)
|
||||
tmpcontext = aggstate->tmpcontext;
|
||||
|
||||
/*
|
||||
* Process each outer-plan tuple, and then fetch the next one,
|
||||
* until we exhaust the outer plan.
|
||||
* Process each outer-plan tuple, and then fetch the next one, until
|
||||
* we exhaust the outer plan.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
@ -921,8 +920,8 @@ agg_retrieve_hash_table(AggState *aggstate)
|
||||
bool *aggnulls;
|
||||
AggStatePerAgg peragg;
|
||||
AggStatePerGroup pergroup;
|
||||
TupleHashTable hashtable;
|
||||
AggHashEntry entry;
|
||||
TupleHashTable hashtable;
|
||||
AggHashEntry entry;
|
||||
TupleTableSlot *firstSlot;
|
||||
TupleTableSlot *resultSlot;
|
||||
int aggno;
|
||||
@ -1045,20 +1044,20 @@ ExecInitAgg(Agg *node, EState *estate)
|
||||
aggstate->hashtable = NULL;
|
||||
|
||||
/*
|
||||
* Create expression contexts. We need two, one for per-input-tuple
|
||||
* processing and one for per-output-tuple processing. We cheat a little
|
||||
* by using ExecAssignExprContext() to build both.
|
||||
* Create expression contexts. We need two, one for per-input-tuple
|
||||
* processing and one for per-output-tuple processing. We cheat a
|
||||
* little by using ExecAssignExprContext() to build both.
|
||||
*/
|
||||
ExecAssignExprContext(estate, &aggstate->ss.ps);
|
||||
aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
|
||||
ExecAssignExprContext(estate, &aggstate->ss.ps);
|
||||
|
||||
/*
|
||||
* We also need a long-lived memory context for holding hashtable
|
||||
* data structures and transition values. NOTE: the details of what
|
||||
* is stored in aggcontext and what is stored in the regular per-query
|
||||
* memory context are driven by a simple decision: we want to reset the
|
||||
* aggcontext in ExecReScanAgg to recover no-longer-wanted space.
|
||||
* We also need a long-lived memory context for holding hashtable data
|
||||
* structures and transition values. NOTE: the details of what is
|
||||
* stored in aggcontext and what is stored in the regular per-query
|
||||
* memory context are driven by a simple decision: we want to reset
|
||||
* the aggcontext in ExecReScanAgg to recover no-longer-wanted space.
|
||||
*/
|
||||
aggstate->aggcontext =
|
||||
AllocSetContextCreate(CurrentMemoryContext,
|
||||
@ -1079,10 +1078,10 @@ ExecInitAgg(Agg *node, EState *estate)
|
||||
* initialize child expressions
|
||||
*
|
||||
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
|
||||
* contain other agg calls in their arguments. This would make no sense
|
||||
* under SQL semantics anyway (and it's forbidden by the spec). Because
|
||||
* that is true, we don't need to worry about evaluating the aggs in any
|
||||
* particular order.
|
||||
* contain other agg calls in their arguments. This would make no
|
||||
* sense under SQL semantics anyway (and it's forbidden by the spec).
|
||||
* Because that is true, we don't need to worry about evaluating the
|
||||
* aggs in any particular order.
|
||||
*/
|
||||
aggstate->ss.ps.targetlist = (List *)
|
||||
ExecInitExpr((Expr *) node->plan.targetlist,
|
||||
@ -1116,19 +1115,20 @@ ExecInitAgg(Agg *node, EState *estate)
|
||||
if (numaggs <= 0)
|
||||
{
|
||||
/*
|
||||
* This is not an error condition: we might be using the Agg node just
|
||||
* to do hash-based grouping. Even in the regular case,
|
||||
* constant-expression simplification could optimize away all of the
|
||||
* Aggrefs in the targetlist and qual. So keep going, but force local
|
||||
* copy of numaggs positive so that palloc()s below don't choke.
|
||||
* This is not an error condition: we might be using the Agg node
|
||||
* just to do hash-based grouping. Even in the regular case,
|
||||
* constant-expression simplification could optimize away all of
|
||||
* the Aggrefs in the targetlist and qual. So keep going, but
|
||||
* force local copy of numaggs positive so that palloc()s below
|
||||
* don't choke.
|
||||
*/
|
||||
numaggs = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are grouping, precompute fmgr lookup data for inner loop.
|
||||
* We need both equality and hashing functions to do it by hashing,
|
||||
* but only equality if not hashing.
|
||||
* If we are grouping, precompute fmgr lookup data for inner loop. We
|
||||
* need both equality and hashing functions to do it by hashing, but
|
||||
* only equality if not hashing.
|
||||
*/
|
||||
if (node->numCols > 0)
|
||||
{
|
||||
@ -1146,8 +1146,8 @@ ExecInitAgg(Agg *node, EState *estate)
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up aggregate-result storage in the output expr context, and also
|
||||
* allocate my private per-agg working storage
|
||||
* Set up aggregate-result storage in the output expr context, and
|
||||
* also allocate my private per-agg working storage
|
||||
*/
|
||||
econtext = aggstate->ss.ps.ps_ExprContext;
|
||||
econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
|
||||
@ -1174,8 +1174,8 @@ ExecInitAgg(Agg *node, EState *estate)
|
||||
* unchanging fields of the per-agg data. We also detect duplicate
|
||||
* aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0").
|
||||
* When duplicates are detected, we only make an AggStatePerAgg struct
|
||||
* for the first one. The clones are simply pointed at the same result
|
||||
* entry by giving them duplicate aggno values.
|
||||
* for the first one. The clones are simply pointed at the same
|
||||
* result entry by giving them duplicate aggno values.
|
||||
*/
|
||||
aggno = -1;
|
||||
foreach(alist, aggstate->aggs)
|
||||
@ -1425,9 +1425,9 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
|
||||
if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
|
||||
{
|
||||
/*
|
||||
* In the hashed case, if we haven't yet built the hash table
|
||||
* then we can just return; nothing done yet, so nothing to undo.
|
||||
* If subnode's chgParam is not NULL then it will be re-scanned by
|
||||
* In the hashed case, if we haven't yet built the hash table then
|
||||
* we can just return; nothing done yet, so nothing to undo. If
|
||||
* subnode's chgParam is not NULL then it will be re-scanned by
|
||||
* ExecProcNode, else no reason to re-scan it at all.
|
||||
*/
|
||||
if (!node->table_filled)
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.52 2003/02/09 00:30:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.53 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -355,7 +355,7 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt)
|
||||
|
||||
for (i = node->as_firstplan; i <= node->as_lastplan; i++)
|
||||
{
|
||||
PlanState *subnode = node->appendplans[i];
|
||||
PlanState *subnode = node->appendplans[i];
|
||||
|
||||
/*
|
||||
* ExecReScan doesn't know about my subplans, so I have to do
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.77 2003/07/21 17:05:09 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.78 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -244,7 +244,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
|
||||
i = 0;
|
||||
foreach(ho, hashOperators)
|
||||
{
|
||||
Oid hashfn;
|
||||
Oid hashfn;
|
||||
|
||||
hashfn = get_op_hash_function(lfirsto(ho));
|
||||
if (!OidIsValid(hashfn))
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.53 2003/07/21 17:05:09 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.54 2003/08/04 00:43:17 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -22,8 +22,8 @@
|
||||
#include "utils/memutils.h"
|
||||
|
||||
|
||||
static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *node,
|
||||
HashJoinState *hjstate);
|
||||
static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState * node,
|
||||
HashJoinState *hjstate);
|
||||
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
|
||||
BufFile *file,
|
||||
TupleTableSlot *tupleSlot);
|
||||
@ -94,10 +94,10 @@ ExecHashJoin(HashJoinState *node)
|
||||
|
||||
/*
|
||||
* If we're doing an IN join, we want to return at most one row per
|
||||
* outer tuple; so we can stop scanning the inner scan if we matched on
|
||||
* the previous try.
|
||||
* outer tuple; so we can stop scanning the inner scan if we matched
|
||||
* on the previous try.
|
||||
*/
|
||||
if (node->js.jointype == JOIN_IN &&
|
||||
if (node->js.jointype == JOIN_IN &&
|
||||
node->hj_MatchedOuter)
|
||||
node->hj_NeedNewOuter = true;
|
||||
|
||||
@ -244,7 +244,10 @@ ExecHashJoin(HashJoinState *node)
|
||||
}
|
||||
}
|
||||
|
||||
/* If we didn't return a tuple, may need to set NeedNewOuter */
|
||||
/*
|
||||
* If we didn't return a tuple, may need to set
|
||||
* NeedNewOuter
|
||||
*/
|
||||
if (node->js.jointype == JOIN_IN)
|
||||
{
|
||||
node->hj_NeedNewOuter = true;
|
||||
@ -365,7 +368,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
|
||||
case JOIN_LEFT:
|
||||
hjstate->hj_NullInnerTupleSlot =
|
||||
ExecInitNullTupleSlot(estate,
|
||||
ExecGetResultType(innerPlanState(hjstate)));
|
||||
ExecGetResultType(innerPlanState(hjstate)));
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized join type: %d",
|
||||
@ -407,10 +410,10 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
|
||||
hjstate->hj_CurTuple = (HashJoinTuple) NULL;
|
||||
|
||||
/*
|
||||
* The planner already made a list of the inner hashkeys for us,
|
||||
* but we also need a list of the outer hashkeys, as well as a list
|
||||
* of the hash operator OIDs. Both lists of exprs must then be prepared
|
||||
* for execution.
|
||||
* The planner already made a list of the inner hashkeys for us, but
|
||||
* we also need a list of the outer hashkeys, as well as a list of the
|
||||
* hash operator OIDs. Both lists of exprs must then be prepared for
|
||||
* execution.
|
||||
*/
|
||||
hjstate->hj_InnerHashKeys = (List *)
|
||||
ExecInitExpr((Expr *) hashNode->hashkeys,
|
||||
@ -496,7 +499,7 @@ ExecEndHashJoin(HashJoinState *node)
|
||||
*/
|
||||
|
||||
static TupleTableSlot *
|
||||
ExecHashJoinOuterGetTuple(PlanState *node, HashJoinState *hjstate)
|
||||
ExecHashJoinOuterGetTuple(PlanState * node, HashJoinState *hjstate)
|
||||
{
|
||||
HashJoinTable hashtable = hjstate->hj_HashTable;
|
||||
int curbatch = hashtable->curbatch;
|
||||
@ -701,11 +704,11 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
|
||||
Assert(node->hj_HashTable != NULL);
|
||||
|
||||
/*
|
||||
* In a multi-batch join, we currently have to do rescans the hard way,
|
||||
* primarily because batch temp files may have already been released.
|
||||
* But if it's a single-batch join, and there is no parameter change
|
||||
* for the inner subnode, then we can just re-use the existing hash
|
||||
* table without rebuilding it.
|
||||
* In a multi-batch join, we currently have to do rescans the hard
|
||||
* way, primarily because batch temp files may have already been
|
||||
* released. But if it's a single-batch join, and there is no
|
||||
* parameter change for the inner subnode, then we can just re-use the
|
||||
* existing hash table without rebuilding it.
|
||||
*/
|
||||
if (node->hj_HashTable->nbatch == 0 &&
|
||||
((PlanState *) node)->righttree->chgParam == NULL)
|
||||
@ -718,6 +721,7 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
|
||||
node->hj_hashdone = false;
|
||||
ExecHashTableDestroy(node->hj_HashTable);
|
||||
node->hj_HashTable = NULL;
|
||||
|
||||
/*
|
||||
* if chgParam of subnode is not null then plan will be re-scanned
|
||||
* by first ExecProcNode.
|
||||
@ -736,8 +740,8 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
|
||||
node->hj_MatchedOuter = false;
|
||||
|
||||
/*
|
||||
* if chgParam of subnode is not null then plan will be re-scanned
|
||||
* by first ExecProcNode.
|
||||
* if chgParam of subnode is not null then plan will be re-scanned by
|
||||
* first ExecProcNode.
|
||||
*/
|
||||
if (((PlanState *) node)->lefttree->chgParam == NULL)
|
||||
ExecReScan(((PlanState *) node)->lefttree, exprCtxt);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.80 2003/07/21 17:05:09 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.81 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -290,7 +290,8 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
|
||||
int j;
|
||||
|
||||
estate = node->ss.ps.state;
|
||||
econtext = node->iss_RuntimeContext; /* context for runtime keys */
|
||||
econtext = node->iss_RuntimeContext; /* context for runtime
|
||||
* keys */
|
||||
numIndices = node->iss_NumIndices;
|
||||
scanDescs = node->iss_ScanDescs;
|
||||
scanKeys = node->iss_ScanKeys;
|
||||
@ -882,7 +883,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
|
||||
reloid)));
|
||||
|
||||
indexstate->ss.ss_currentRelation = currentRelation;
|
||||
indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */
|
||||
indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */
|
||||
|
||||
/*
|
||||
* get the scan type from the relation descriptor.
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.15 2003/07/21 17:05:09 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.16 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -55,17 +55,21 @@ ExecLimit(LimitState *node)
|
||||
switch (node->lstate)
|
||||
{
|
||||
case LIMIT_INITIAL:
|
||||
|
||||
/*
|
||||
* If backwards scan, just return NULL without changing state.
|
||||
*/
|
||||
if (!ScanDirectionIsForward(direction))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* First call for this scan, so compute limit/offset. (We can't do
|
||||
* this any earlier, because parameters from upper nodes may not
|
||||
* be set until now.) This also sets position = 0.
|
||||
* First call for this scan, so compute limit/offset. (We
|
||||
* can't do this any earlier, because parameters from upper
|
||||
* nodes may not be set until now.) This also sets position =
|
||||
* 0.
|
||||
*/
|
||||
recompute_limits(node);
|
||||
|
||||
/*
|
||||
* Check for empty window; if so, treat like empty subplan.
|
||||
*/
|
||||
@ -74,6 +78,7 @@ ExecLimit(LimitState *node)
|
||||
node->lstate = LIMIT_EMPTY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch rows from subplan until we reach position > offset.
|
||||
*/
|
||||
@ -83,8 +88,8 @@ ExecLimit(LimitState *node)
|
||||
if (TupIsNull(slot))
|
||||
{
|
||||
/*
|
||||
* The subplan returns too few tuples for us to produce
|
||||
* any output at all.
|
||||
* The subplan returns too few tuples for us to
|
||||
* produce any output at all.
|
||||
*/
|
||||
node->lstate = LIMIT_EMPTY;
|
||||
return NULL;
|
||||
@ -93,6 +98,7 @@ ExecLimit(LimitState *node)
|
||||
if (++node->position > node->offset)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Okay, we have the first tuple of the window.
|
||||
*/
|
||||
@ -100,9 +106,10 @@ ExecLimit(LimitState *node)
|
||||
break;
|
||||
|
||||
case LIMIT_EMPTY:
|
||||
|
||||
/*
|
||||
* The subplan is known to return no tuples (or not more than
|
||||
* OFFSET tuples, in general). So we return no tuples.
|
||||
* OFFSET tuples, in general). So we return no tuples.
|
||||
*/
|
||||
return NULL;
|
||||
|
||||
@ -113,7 +120,8 @@ ExecLimit(LimitState *node)
|
||||
* Forwards scan, so check for stepping off end of window.
|
||||
* If we are at the end of the window, return NULL without
|
||||
* advancing the subplan or the position variable; but
|
||||
* change the state machine state to record having done so.
|
||||
* change the state machine state to record having done
|
||||
* so.
|
||||
*/
|
||||
if (!node->noCount &&
|
||||
node->position >= node->offset + node->count)
|
||||
@ -121,6 +129,7 @@ ExecLimit(LimitState *node)
|
||||
node->lstate = LIMIT_WINDOWEND;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get next tuple from subplan, if any.
|
||||
*/
|
||||
@ -136,14 +145,16 @@ ExecLimit(LimitState *node)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Backwards scan, so check for stepping off start of window.
|
||||
* As above, change only state-machine status if so.
|
||||
* Backwards scan, so check for stepping off start of
|
||||
* window. As above, change only state-machine status if
|
||||
* so.
|
||||
*/
|
||||
if (node->position <= node->offset + 1)
|
||||
{
|
||||
node->lstate = LIMIT_WINDOWSTART;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get previous tuple from subplan; there should be one!
|
||||
*/
|
||||
@ -158,9 +169,11 @@ ExecLimit(LimitState *node)
|
||||
case LIMIT_SUBPLANEOF:
|
||||
if (ScanDirectionIsForward(direction))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Backing up from subplan EOF, so re-fetch previous tuple;
|
||||
* there should be one! Note previous tuple must be in window.
|
||||
* there should be one! Note previous tuple must be in
|
||||
* window.
|
||||
*/
|
||||
slot = ExecProcNode(outerPlan);
|
||||
if (TupIsNull(slot))
|
||||
@ -173,9 +186,10 @@ ExecLimit(LimitState *node)
|
||||
case LIMIT_WINDOWEND:
|
||||
if (ScanDirectionIsForward(direction))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Backing up from window end: simply re-return the last
|
||||
* tuple fetched from the subplan.
|
||||
* Backing up from window end: simply re-return the last tuple
|
||||
* fetched from the subplan.
|
||||
*/
|
||||
slot = node->subSlot;
|
||||
node->lstate = LIMIT_INWINDOW;
|
||||
@ -185,6 +199,7 @@ ExecLimit(LimitState *node)
|
||||
case LIMIT_WINDOWSTART:
|
||||
if (!ScanDirectionIsForward(direction))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Advancing after having backed off window start: simply
|
||||
* re-return the last tuple fetched from the subplan.
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.42 2003/03/27 16:51:27 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.43 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -79,15 +79,15 @@ ExecMaterial(MaterialState *node)
|
||||
{
|
||||
/*
|
||||
* When reversing direction at tuplestore EOF, the first
|
||||
* getheaptuple call will fetch the last-added tuple; but
|
||||
* we want to return the one before that, if possible.
|
||||
* So do an extra fetch.
|
||||
* getheaptuple call will fetch the last-added tuple; but we
|
||||
* want to return the one before that, if possible. So do an
|
||||
* extra fetch.
|
||||
*/
|
||||
heapTuple = tuplestore_getheaptuple(tuplestorestate,
|
||||
forward,
|
||||
&should_free);
|
||||
if (heapTuple == NULL)
|
||||
return NULL; /* the tuplestore must be empty */
|
||||
return NULL; /* the tuplestore must be empty */
|
||||
if (should_free)
|
||||
heap_freetuple(heapTuple);
|
||||
}
|
||||
@ -129,10 +129,11 @@ ExecMaterial(MaterialState *node)
|
||||
}
|
||||
heapTuple = outerslot->val;
|
||||
should_free = false;
|
||||
|
||||
/*
|
||||
* Append returned tuple to tuplestore, too. NOTE: because the
|
||||
* tuplestore is certainly in EOF state, its read position will move
|
||||
* forward over the added tuple. This is what we want.
|
||||
* tuplestore is certainly in EOF state, its read position will
|
||||
* move forward over the added tuple. This is what we want.
|
||||
*/
|
||||
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
|
||||
}
|
||||
@ -293,8 +294,8 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
|
||||
* If subnode is to be rescanned then we forget previous stored
|
||||
* results; we have to re-read the subplan and re-store.
|
||||
*
|
||||
* Otherwise we can just rewind and rescan the stored output.
|
||||
* The state of the subnode does not change.
|
||||
* Otherwise we can just rewind and rescan the stored output. The state
|
||||
* of the subnode does not change.
|
||||
*/
|
||||
if (((PlanState *) node)->lefttree->chgParam != NULL)
|
||||
{
|
||||
@ -303,7 +304,5 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
|
||||
node->eof_underlying = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
tuplestore_rescan((Tuplestorestate *) node->tuplestorestate);
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.58 2003/07/21 17:05:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.59 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -102,7 +102,7 @@ static bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
|
||||
*/
|
||||
static void
|
||||
MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals,
|
||||
PlanState *parent)
|
||||
PlanState * parent)
|
||||
{
|
||||
List *ltexprs,
|
||||
*gtexprs,
|
||||
@ -358,9 +358,9 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
List *otherqual;
|
||||
bool qualResult;
|
||||
bool compareResult;
|
||||
PlanState *innerPlan;
|
||||
PlanState *innerPlan;
|
||||
TupleTableSlot *innerTupleSlot;
|
||||
PlanState *outerPlan;
|
||||
PlanState *outerPlan;
|
||||
TupleTableSlot *outerTupleSlot;
|
||||
ExprContext *econtext;
|
||||
bool doFillOuter;
|
||||
@ -644,7 +644,7 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
* tuple, and return it if it passes the non-join
|
||||
* quals.
|
||||
*/
|
||||
node->mj_MatchedInner = true; /* do it only once */
|
||||
node->mj_MatchedInner = true; /* do it only once */
|
||||
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -720,7 +720,7 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
* tuple, and return it if it passes the non-join
|
||||
* quals.
|
||||
*/
|
||||
node->mj_MatchedOuter = true; /* do it only once */
|
||||
node->mj_MatchedOuter = true; /* do it only once */
|
||||
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -1004,7 +1004,7 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
* tuple, and return it if it passes the non-join
|
||||
* quals.
|
||||
*/
|
||||
node->mj_MatchedOuter = true; /* do it only once */
|
||||
node->mj_MatchedOuter = true; /* do it only once */
|
||||
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -1181,7 +1181,7 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
* tuple, and return it if it passes the non-join
|
||||
* quals.
|
||||
*/
|
||||
node->mj_MatchedInner = true; /* do it only once */
|
||||
node->mj_MatchedInner = true; /* do it only once */
|
||||
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -1266,7 +1266,7 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
* tuple, and return it if it passes the non-join
|
||||
* quals.
|
||||
*/
|
||||
node->mj_MatchedInner = true; /* do it only once */
|
||||
node->mj_MatchedInner = true; /* do it only once */
|
||||
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -1333,7 +1333,7 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
* tuple, and return it if it passes the non-join
|
||||
* quals.
|
||||
*/
|
||||
node->mj_MatchedOuter = true; /* do it only once */
|
||||
node->mj_MatchedOuter = true; /* do it only once */
|
||||
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -1462,12 +1462,12 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
|
||||
case JOIN_LEFT:
|
||||
mergestate->mj_NullInnerTupleSlot =
|
||||
ExecInitNullTupleSlot(estate,
|
||||
ExecGetResultType(innerPlanState(mergestate)));
|
||||
ExecGetResultType(innerPlanState(mergestate)));
|
||||
break;
|
||||
case JOIN_RIGHT:
|
||||
mergestate->mj_NullOuterTupleSlot =
|
||||
ExecInitNullTupleSlot(estate,
|
||||
ExecGetResultType(outerPlanState(mergestate)));
|
||||
ExecGetResultType(outerPlanState(mergestate)));
|
||||
|
||||
/*
|
||||
* Can't handle right or full join with non-nil extra
|
||||
@ -1481,10 +1481,10 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
|
||||
case JOIN_FULL:
|
||||
mergestate->mj_NullOuterTupleSlot =
|
||||
ExecInitNullTupleSlot(estate,
|
||||
ExecGetResultType(outerPlanState(mergestate)));
|
||||
ExecGetResultType(outerPlanState(mergestate)));
|
||||
mergestate->mj_NullInnerTupleSlot =
|
||||
ExecInitNullTupleSlot(estate,
|
||||
ExecGetResultType(innerPlanState(mergestate)));
|
||||
ExecGetResultType(innerPlanState(mergestate)));
|
||||
|
||||
/*
|
||||
* Can't handle right or full join with non-nil extra
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.33 2003/07/21 17:05:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.34 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -103,8 +103,8 @@ ExecNestLoop(NestLoopState *node)
|
||||
|
||||
/*
|
||||
* If we're doing an IN join, we want to return at most one row per
|
||||
* outer tuple; so we can stop scanning the inner scan if we matched on
|
||||
* the previous try.
|
||||
* outer tuple; so we can stop scanning the inner scan if we matched
|
||||
* on the previous try.
|
||||
*/
|
||||
if (node->js.jointype == JOIN_IN &&
|
||||
node->nl_MatchedOuter)
|
||||
@ -330,7 +330,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate)
|
||||
case JOIN_LEFT:
|
||||
nlstate->nl_NullInnerTupleSlot =
|
||||
ExecInitNullTupleSlot(estate,
|
||||
ExecGetResultType(innerPlanState(nlstate)));
|
||||
ExecGetResultType(innerPlanState(nlstate)));
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized join type: %d",
|
||||
@ -404,7 +404,7 @@ ExecEndNestLoop(NestLoopState *node)
|
||||
void
|
||||
ExecReScanNestLoop(NestLoopState *node, ExprContext *exprCtxt)
|
||||
{
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
|
||||
/*
|
||||
* If outerPlan->chgParam is not null then plan will be automatically
|
||||
|
@ -34,7 +34,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.24 2002/12/15 16:17:46 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.25 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -64,7 +64,7 @@ ExecResult(ResultState *node)
|
||||
{
|
||||
TupleTableSlot *outerTupleSlot;
|
||||
TupleTableSlot *resultSlot;
|
||||
PlanState *outerPlan;
|
||||
PlanState *outerPlan;
|
||||
ExprContext *econtext;
|
||||
ExprDoneCond isDone;
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.43 2003/02/03 15:07:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.44 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -29,8 +29,8 @@
|
||||
#include "executor/nodeSeqscan.h"
|
||||
#include "parser/parsetree.h"
|
||||
|
||||
static void InitScanRelation(SeqScanState *node, EState *estate);
|
||||
static TupleTableSlot *SeqNext(SeqScanState *node);
|
||||
static void InitScanRelation(SeqScanState * node, EState *estate);
|
||||
static TupleTableSlot *SeqNext(SeqScanState * node);
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
* Scan Support
|
||||
@ -43,7 +43,7 @@ static TupleTableSlot *SeqNext(SeqScanState *node);
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static TupleTableSlot *
|
||||
SeqNext(SeqScanState *node)
|
||||
SeqNext(SeqScanState * node)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
HeapScanDesc scandesc;
|
||||
@ -123,7 +123,7 @@ SeqNext(SeqScanState *node)
|
||||
*/
|
||||
|
||||
TupleTableSlot *
|
||||
ExecSeqScan(SeqScanState *node)
|
||||
ExecSeqScan(SeqScanState * node)
|
||||
{
|
||||
/*
|
||||
* use SeqNext as access method
|
||||
@ -139,7 +139,7 @@ ExecSeqScan(SeqScanState *node)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static void
|
||||
InitScanRelation(SeqScanState *node, EState *estate)
|
||||
InitScanRelation(SeqScanState * node, EState *estate)
|
||||
{
|
||||
Index relid;
|
||||
List *rangeTable;
|
||||
@ -252,7 +252,7 @@ ExecCountSlotsSeqScan(SeqScan *node)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecEndSeqScan(SeqScanState *node)
|
||||
ExecEndSeqScan(SeqScanState * node)
|
||||
{
|
||||
Relation relation;
|
||||
HeapScanDesc scanDesc;
|
||||
@ -302,7 +302,7 @@ ExecEndSeqScan(SeqScanState *node)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecSeqReScan(SeqScanState *node, ExprContext *exprCtxt)
|
||||
ExecSeqReScan(SeqScanState * node, ExprContext *exprCtxt)
|
||||
{
|
||||
EState *estate;
|
||||
Index scanrelid;
|
||||
@ -332,7 +332,7 @@ ExecSeqReScan(SeqScanState *node, ExprContext *exprCtxt)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecSeqMarkPos(SeqScanState *node)
|
||||
ExecSeqMarkPos(SeqScanState * node)
|
||||
{
|
||||
HeapScanDesc scan;
|
||||
|
||||
@ -347,7 +347,7 @@ ExecSeqMarkPos(SeqScanState *node)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecSeqRestrPos(SeqScanState *node)
|
||||
ExecSeqRestrPos(SeqScanState * node)
|
||||
{
|
||||
HeapScanDesc scan;
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.51 2003/07/21 17:05:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.52 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -29,13 +29,13 @@
|
||||
#include "utils/lsyscache.h"
|
||||
|
||||
|
||||
static Datum ExecHashSubPlan(SubPlanState *node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecScanSubPlan(SubPlanState *node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static void buildSubPlanHash(SubPlanState *node);
|
||||
static Datum ExecHashSubPlan(SubPlanState * node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static Datum ExecScanSubPlan(SubPlanState * node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
static void buildSubPlanHash(SubPlanState * node);
|
||||
static bool findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot);
|
||||
static bool tupleAllNulls(HeapTuple tuple);
|
||||
|
||||
@ -45,11 +45,11 @@ static bool tupleAllNulls(HeapTuple tuple);
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
Datum
|
||||
ExecSubPlan(SubPlanState *node,
|
||||
ExecSubPlan(SubPlanState * node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
|
||||
if (subplan->setParam != NIL)
|
||||
elog(ERROR, "cannot set parent params from subquery");
|
||||
@ -64,11 +64,11 @@ ExecSubPlan(SubPlanState *node,
|
||||
* ExecHashSubPlan: store subselect result in an in-memory hash table
|
||||
*/
|
||||
static Datum
|
||||
ExecHashSubPlan(SubPlanState *node,
|
||||
ExecHashSubPlan(SubPlanState * node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
PlanState *planstate = node->planstate;
|
||||
ExprContext *innerecontext = node->innerecontext;
|
||||
TupleTableSlot *slot;
|
||||
@ -79,8 +79,8 @@ ExecHashSubPlan(SubPlanState *node,
|
||||
elog(ERROR, "hashed subplan with direct correlation not supported");
|
||||
|
||||
/*
|
||||
* If first time through or we need to rescan the subplan, build
|
||||
* the hash table.
|
||||
* If first time through or we need to rescan the subplan, build the
|
||||
* hash table.
|
||||
*/
|
||||
if (node->hashtable == NULL || planstate->chgParam != NULL)
|
||||
buildSubPlanHash(node);
|
||||
@ -94,19 +94,19 @@ ExecHashSubPlan(SubPlanState *node,
|
||||
return BoolGetDatum(false);
|
||||
|
||||
/*
|
||||
* Evaluate lefthand expressions and form a projection tuple.
|
||||
* First we have to set the econtext to use (hack alert!).
|
||||
* Evaluate lefthand expressions and form a projection tuple. First we
|
||||
* have to set the econtext to use (hack alert!).
|
||||
*/
|
||||
node->projLeft->pi_exprContext = econtext;
|
||||
slot = ExecProject(node->projLeft, NULL);
|
||||
tup = slot->val;
|
||||
|
||||
/*
|
||||
* Note: because we are typically called in a per-tuple context,
|
||||
* we have to explicitly clear the projected tuple before returning.
|
||||
* Otherwise, we'll have a double-free situation: the per-tuple context
|
||||
* will probably be reset before we're called again, and then the tuple
|
||||
* slot will think it still needs to free the tuple.
|
||||
* Note: because we are typically called in a per-tuple context, we
|
||||
* have to explicitly clear the projected tuple before returning.
|
||||
* Otherwise, we'll have a double-free situation: the per-tuple
|
||||
* context will probably be reset before we're called again, and then
|
||||
* the tuple slot will think it still needs to free the tuple.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -116,20 +116,20 @@ ExecHashSubPlan(SubPlanState *node,
|
||||
ResetExprContext(innerecontext);
|
||||
|
||||
/*
|
||||
* If the LHS is all non-null, probe for an exact match in the
|
||||
* main hash table. If we find one, the result is TRUE.
|
||||
* Otherwise, scan the partly-null table to see if there are any
|
||||
* rows that aren't provably unequal to the LHS; if so, the result
|
||||
* is UNKNOWN. (We skip that part if we don't care about UNKNOWN.)
|
||||
* Otherwise, the result is FALSE.
|
||||
* If the LHS is all non-null, probe for an exact match in the main
|
||||
* hash table. If we find one, the result is TRUE. Otherwise, scan
|
||||
* the partly-null table to see if there are any rows that aren't
|
||||
* provably unequal to the LHS; if so, the result is UNKNOWN. (We
|
||||
* skip that part if we don't care about UNKNOWN.) Otherwise, the
|
||||
* result is FALSE.
|
||||
*
|
||||
* Note: the reason we can avoid a full scan of the main hash table
|
||||
* is that the combining operators are assumed never to yield NULL
|
||||
* when both inputs are non-null. If they were to do so, we might
|
||||
* need to produce UNKNOWN instead of FALSE because of an UNKNOWN
|
||||
* result in comparing the LHS to some main-table entry --- which
|
||||
* is a comparison we will not even make, unless there's a chance
|
||||
* match of hash keys.
|
||||
* Note: the reason we can avoid a full scan of the main hash table is
|
||||
* that the combining operators are assumed never to yield NULL when
|
||||
* both inputs are non-null. If they were to do so, we might need to
|
||||
* produce UNKNOWN instead of FALSE because of an UNKNOWN result in
|
||||
* comparing the LHS to some main-table entry --- which is a
|
||||
* comparison we will not even make, unless there's a chance match of
|
||||
* hash keys.
|
||||
*/
|
||||
if (HeapTupleNoNulls(tup))
|
||||
{
|
||||
@ -151,14 +151,14 @@ ExecHashSubPlan(SubPlanState *node,
|
||||
}
|
||||
|
||||
/*
|
||||
* When the LHS is partly or wholly NULL, we can never return TRUE.
|
||||
* If we don't care about UNKNOWN, just return FALSE. Otherwise,
|
||||
* if the LHS is wholly NULL, immediately return UNKNOWN. (Since the
|
||||
* combining operators are strict, the result could only be FALSE if the
|
||||
* sub-select were empty, but we already handled that case.) Otherwise,
|
||||
* we must scan both the main and partly-null tables to see if there are
|
||||
* any rows that aren't provably unequal to the LHS; if so, the result is
|
||||
* UNKNOWN. Otherwise, the result is FALSE.
|
||||
* When the LHS is partly or wholly NULL, we can never return TRUE. If
|
||||
* we don't care about UNKNOWN, just return FALSE. Otherwise, if the
|
||||
* LHS is wholly NULL, immediately return UNKNOWN. (Since the
|
||||
* combining operators are strict, the result could only be FALSE if
|
||||
* the sub-select were empty, but we already handled that case.)
|
||||
* Otherwise, we must scan both the main and partly-null tables to see
|
||||
* if there are any rows that aren't provably unequal to the LHS; if
|
||||
* so, the result is UNKNOWN. Otherwise, the result is FALSE.
|
||||
*/
|
||||
if (node->hashnulls == NULL)
|
||||
{
|
||||
@ -194,11 +194,11 @@ ExecHashSubPlan(SubPlanState *node,
|
||||
* ExecScanSubPlan: default case where we have to rescan subplan each time
|
||||
*/
|
||||
static Datum
|
||||
ExecScanSubPlan(SubPlanState *node,
|
||||
ExecScanSubPlan(SubPlanState * node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
PlanState *planstate = node->planstate;
|
||||
SubLinkType subLinkType = subplan->subLinkType;
|
||||
bool useOr = subplan->useOr;
|
||||
@ -218,14 +218,14 @@ ExecScanSubPlan(SubPlanState *node,
|
||||
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
|
||||
|
||||
/*
|
||||
* Set Params of this plan from parent plan correlation values.
|
||||
* (Any calculation we have to do is done in the parent econtext,
|
||||
* since the Param values don't need to have per-query lifetime.)
|
||||
* Set Params of this plan from parent plan correlation values. (Any
|
||||
* calculation we have to do is done in the parent econtext, since the
|
||||
* Param values don't need to have per-query lifetime.)
|
||||
*/
|
||||
pvar = node->args;
|
||||
foreach(lst, subplan->parParam)
|
||||
{
|
||||
int paramid = lfirsti(lst);
|
||||
int paramid = lfirsti(lst);
|
||||
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
|
||||
|
||||
Assert(pvar != NIL);
|
||||
@ -241,23 +241,24 @@ ExecScanSubPlan(SubPlanState *node,
|
||||
ExecReScan(planstate, NULL);
|
||||
|
||||
/*
|
||||
* For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the result
|
||||
* is boolean as are the results of the combining operators. We combine
|
||||
* results within a tuple (if there are multiple columns) using OR
|
||||
* semantics if "useOr" is true, AND semantics if not. We then combine
|
||||
* results across tuples (if the subplan produces more than one) using OR
|
||||
* semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
|
||||
* (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.)
|
||||
* NULL results from the combining operators are handled according to
|
||||
* the usual SQL semantics for OR and AND. The result for no input
|
||||
* tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
|
||||
* MULTIEXPR_SUBLINK.
|
||||
* For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the
|
||||
* result is boolean as are the results of the combining operators. We
|
||||
* combine results within a tuple (if there are multiple columns)
|
||||
* using OR semantics if "useOr" is true, AND semantics if not. We
|
||||
* then combine results across tuples (if the subplan produces more
|
||||
* than one) using OR semantics for ANY_SUBLINK or AND semantics for
|
||||
* ALL_SUBLINK. (MULTIEXPR_SUBLINK doesn't allow multiple tuples from
|
||||
* the subplan.) NULL results from the combining operators are handled
|
||||
* according to the usual SQL semantics for OR and AND. The result
|
||||
* for no input tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK,
|
||||
* NULL for MULTIEXPR_SUBLINK.
|
||||
*
|
||||
* For EXPR_SUBLINK we require the subplan to produce no more than one
|
||||
* tuple, else an error is raised. For ARRAY_SUBLINK we allow the subplan
|
||||
* to produce more than one tuple. In either case, if zero tuples are
|
||||
* produced, we return NULL. Assuming we get a tuple, we just use its
|
||||
* first column (there can be only one non-junk column in this case).
|
||||
* tuple, else an error is raised. For ARRAY_SUBLINK we allow the
|
||||
* subplan to produce more than one tuple. In either case, if zero
|
||||
* tuples are produced, we return NULL. Assuming we get a tuple, we
|
||||
* just use its first column (there can be only one non-junk column in
|
||||
* this case).
|
||||
*/
|
||||
result = BoolGetDatum(subLinkType == ALL_SUBLINK);
|
||||
*isNull = false;
|
||||
@ -311,8 +312,8 @@ ExecScanSubPlan(SubPlanState *node,
|
||||
|
||||
if (subLinkType == ARRAY_SUBLINK)
|
||||
{
|
||||
Datum dvalue;
|
||||
bool disnull;
|
||||
Datum dvalue;
|
||||
bool disnull;
|
||||
|
||||
found = true;
|
||||
/* stash away current value */
|
||||
@ -346,7 +347,8 @@ ExecScanSubPlan(SubPlanState *node,
|
||||
bool expnull;
|
||||
|
||||
/*
|
||||
* Load up the Param representing this column of the sub-select.
|
||||
* Load up the Param representing this column of the
|
||||
* sub-select.
|
||||
*/
|
||||
prmdata = &(econtext->ecxt_param_exec_vals[paramid]);
|
||||
Assert(prmdata->execPlan == NULL);
|
||||
@ -432,8 +434,8 @@ ExecScanSubPlan(SubPlanState *node,
|
||||
{
|
||||
/*
|
||||
* deal with empty subplan result. result/isNull were previously
|
||||
* initialized correctly for all sublink types except EXPR, ARRAY, and
|
||||
* MULTIEXPR; for those, return NULL.
|
||||
* initialized correctly for all sublink types except EXPR, ARRAY,
|
||||
* and MULTIEXPR; for those, return NULL.
|
||||
*/
|
||||
if (subLinkType == EXPR_SUBLINK ||
|
||||
subLinkType == ARRAY_SUBLINK ||
|
||||
@ -459,9 +461,9 @@ ExecScanSubPlan(SubPlanState *node,
|
||||
* buildSubPlanHash: load hash table by scanning subplan output.
|
||||
*/
|
||||
static void
|
||||
buildSubPlanHash(SubPlanState *node)
|
||||
buildSubPlanHash(SubPlanState * node)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
PlanState *planstate = node->planstate;
|
||||
int ncols = length(node->exprs);
|
||||
ExprContext *innerecontext = node->innerecontext;
|
||||
@ -474,19 +476,19 @@ buildSubPlanHash(SubPlanState *node)
|
||||
Assert(!subplan->useOr);
|
||||
|
||||
/*
|
||||
* If we already had any hash tables, destroy 'em; then create
|
||||
* empty hash table(s).
|
||||
* If we already had any hash tables, destroy 'em; then create empty
|
||||
* hash table(s).
|
||||
*
|
||||
* If we need to distinguish accurately between FALSE and UNKNOWN
|
||||
* (i.e., NULL) results of the IN operation, then we have to store
|
||||
* subplan output rows that are partly or wholly NULL. We store such
|
||||
* rows in a separate hash table that we expect will be much smaller
|
||||
* than the main table. (We can use hashing to eliminate partly-null
|
||||
* rows that are not distinct. We keep them separate to minimize the
|
||||
* cost of the inevitable full-table searches; see findPartialMatch.)
|
||||
* If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
|
||||
* NULL) results of the IN operation, then we have to store subplan
|
||||
* output rows that are partly or wholly NULL. We store such rows in
|
||||
* a separate hash table that we expect will be much smaller than the
|
||||
* main table. (We can use hashing to eliminate partly-null rows that
|
||||
* are not distinct. We keep them separate to minimize the cost of
|
||||
* the inevitable full-table searches; see findPartialMatch.)
|
||||
*
|
||||
* If it's not necessary to distinguish FALSE and UNKNOWN, then we
|
||||
* don't need to store subplan output rows that contain NULL.
|
||||
* If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
|
||||
* need to store subplan output rows that contain NULL.
|
||||
*/
|
||||
MemoryContextReset(node->tablecxt);
|
||||
node->hashtable = NULL;
|
||||
@ -529,7 +531,8 @@ buildSubPlanHash(SubPlanState *node)
|
||||
|
||||
/*
|
||||
* We are probably in a short-lived expression-evaluation context.
|
||||
* Switch to the child plan's per-query context for calling ExecProcNode.
|
||||
* Switch to the child plan's per-query context for calling
|
||||
* ExecProcNode.
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
|
||||
|
||||
@ -539,8 +542,9 @@ buildSubPlanHash(SubPlanState *node)
|
||||
ExecReScan(planstate, NULL);
|
||||
|
||||
/*
|
||||
* Scan the subplan and load the hash table(s). Note that when there are
|
||||
* duplicate rows coming out of the sub-select, only one copy is stored.
|
||||
* Scan the subplan and load the hash table(s). Note that when there
|
||||
* are duplicate rows coming out of the sub-select, only one copy is
|
||||
* stored.
|
||||
*/
|
||||
for (slot = ExecProcNode(planstate);
|
||||
!TupIsNull(slot);
|
||||
@ -572,9 +576,9 @@ buildSubPlanHash(SubPlanState *node)
|
||||
|
||||
/*
|
||||
* If result contains any nulls, store separately or not at all.
|
||||
* (Since we know the projection tuple has no junk columns, we
|
||||
* can just look at the overall hasnull info bit, instead of
|
||||
* groveling through the columns.)
|
||||
* (Since we know the projection tuple has no junk columns, we can
|
||||
* just look at the overall hasnull info bit, instead of groveling
|
||||
* through the columns.)
|
||||
*/
|
||||
if (HeapTupleNoNulls(tup))
|
||||
{
|
||||
@ -621,7 +625,7 @@ findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot)
|
||||
HeapTuple tuple = slot->val;
|
||||
TupleDesc tupdesc = slot->ttc_tupleDescriptor;
|
||||
TupleHashIterator hashiter;
|
||||
TupleHashEntry entry;
|
||||
TupleHashEntry entry;
|
||||
|
||||
ResetTupleHashIterator(&hashiter);
|
||||
while ((entry = ScanTupleHashTable(hashtable, &hashiter)) != NULL)
|
||||
@ -643,8 +647,8 @@ findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot)
|
||||
static bool
|
||||
tupleAllNulls(HeapTuple tuple)
|
||||
{
|
||||
int ncols = tuple->t_data->t_natts;
|
||||
int i;
|
||||
int ncols = tuple->t_data->t_natts;
|
||||
int i;
|
||||
|
||||
for (i = 1; i <= ncols; i++)
|
||||
{
|
||||
@ -659,15 +663,15 @@ tupleAllNulls(HeapTuple tuple)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
ExecInitSubPlan(SubPlanState * node, EState *estate)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
EState *sp_estate;
|
||||
MemoryContext oldcontext;
|
||||
|
||||
/*
|
||||
* Do access checking on the rangetable entries in the subquery.
|
||||
* Here, we assume the subquery is a SELECT.
|
||||
* Do access checking on the rangetable entries in the subquery. Here,
|
||||
* we assume the subquery is a SELECT.
|
||||
*/
|
||||
ExecCheckRTPerms(subplan->rtable, CMD_SELECT);
|
||||
|
||||
@ -690,9 +694,9 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
* create an EState for the subplan
|
||||
*
|
||||
* The subquery needs its own EState because it has its own rangetable.
|
||||
* It shares our Param ID space, however. XXX if rangetable access were
|
||||
* done differently, the subquery could share our EState, which would
|
||||
* eliminate some thrashing about in this module...
|
||||
* It shares our Param ID space, however. XXX if rangetable access
|
||||
* were done differently, the subquery could share our EState, which
|
||||
* would eliminate some thrashing about in this module...
|
||||
*/
|
||||
sp_estate = CreateExecutorState();
|
||||
node->sub_estate = sp_estate;
|
||||
@ -721,9 +725,9 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
* to set params for parent plan then mark parameters as needing
|
||||
* evaluation.
|
||||
*
|
||||
* Note that in the case of un-correlated subqueries we don't care
|
||||
* about setting parent->chgParam here: indices take care about
|
||||
* it, for others - it doesn't matter...
|
||||
* Note that in the case of un-correlated subqueries we don't care about
|
||||
* setting parent->chgParam here: indices take care about it, for
|
||||
* others - it doesn't matter...
|
||||
*/
|
||||
if (subplan->setParam != NIL)
|
||||
{
|
||||
@ -731,7 +735,7 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
|
||||
foreach(lst, subplan->setParam)
|
||||
{
|
||||
int paramid = lfirsti(lst);
|
||||
int paramid = lfirsti(lst);
|
||||
ParamExecData *prm = &(estate->es_param_exec_vals[paramid]);
|
||||
|
||||
prm->execPlan = node;
|
||||
@ -744,8 +748,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
*/
|
||||
if (subplan->useHashTable)
|
||||
{
|
||||
int ncols,
|
||||
i;
|
||||
int ncols,
|
||||
i;
|
||||
TupleDesc tupDesc;
|
||||
TupleTable tupTable;
|
||||
TupleTableSlot *slot;
|
||||
@ -768,15 +772,16 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
ncols = length(node->exprs);
|
||||
node->keyColIdx = (AttrNumber *) palloc(ncols * sizeof(AttrNumber));
|
||||
for (i = 0; i < ncols; i++)
|
||||
node->keyColIdx[i] = i+1;
|
||||
node->keyColIdx[i] = i + 1;
|
||||
|
||||
/*
|
||||
* We use ExecProject to evaluate the lefthand and righthand
|
||||
* expression lists and form tuples. (You might think that we
|
||||
* could use the sub-select's output tuples directly, but that is
|
||||
* not the case if we had to insert any run-time coercions of the
|
||||
* sub-select's output datatypes; anyway this avoids storing any
|
||||
* resjunk columns that might be in the sub-select's output.)
|
||||
* Run through the combining expressions to build tlists for the
|
||||
* resjunk columns that might be in the sub-select's output.) Run
|
||||
* through the combining expressions to build tlists for the
|
||||
* lefthand and righthand sides. We need both the ExprState list
|
||||
* (for ExecProject) and the underlying parse Exprs (for
|
||||
* ExecTypeFromTL).
|
||||
@ -791,7 +796,7 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
i = 1;
|
||||
foreach(lexpr, node->exprs)
|
||||
{
|
||||
FuncExprState *fstate = (FuncExprState *) lfirst(lexpr);
|
||||
FuncExprState *fstate = (FuncExprState *) lfirst(lexpr);
|
||||
OpExpr *opexpr = (OpExpr *) fstate->xprstate.expr;
|
||||
ExprState *exstate;
|
||||
Expr *expr;
|
||||
@ -834,34 +839,34 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
rightptlist = lappend(rightptlist, tle);
|
||||
|
||||
/* Lookup the combining function */
|
||||
fmgr_info(opexpr->opfuncid, &node->eqfunctions[i-1]);
|
||||
node->eqfunctions[i-1].fn_expr = (Node *) opexpr;
|
||||
fmgr_info(opexpr->opfuncid, &node->eqfunctions[i - 1]);
|
||||
node->eqfunctions[i - 1].fn_expr = (Node *) opexpr;
|
||||
|
||||
/* Lookup the associated hash function */
|
||||
hashfn = get_op_hash_function(opexpr->opno);
|
||||
if (!OidIsValid(hashfn))
|
||||
elog(ERROR, "could not find hash function for hash operator %u",
|
||||
opexpr->opno);
|
||||
fmgr_info(hashfn, &node->hashfunctions[i-1]);
|
||||
fmgr_info(hashfn, &node->hashfunctions[i - 1]);
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a tupletable to hold these tuples. (Note: we never bother
|
||||
* to free the tupletable explicitly; that's okay because it will
|
||||
* never store raw disk tuples that might have associated buffer
|
||||
* pins. The only resource involved is memory, which will be
|
||||
* cleaned up by freeing the query context.)
|
||||
* Create a tupletable to hold these tuples. (Note: we never
|
||||
* bother to free the tupletable explicitly; that's okay because
|
||||
* it will never store raw disk tuples that might have associated
|
||||
* buffer pins. The only resource involved is memory, which will
|
||||
* be cleaned up by freeing the query context.)
|
||||
*/
|
||||
tupTable = ExecCreateTupleTable(2);
|
||||
|
||||
/*
|
||||
* Construct tupdescs, slots and projection nodes for left and
|
||||
* right sides. The lefthand expressions will be evaluated in
|
||||
* the parent plan node's exprcontext, which we don't have access
|
||||
* to here. Fortunately we can just pass NULL for now and fill it
|
||||
* in later (hack alert!). The righthand expressions will be
|
||||
* right sides. The lefthand expressions will be evaluated in the
|
||||
* parent plan node's exprcontext, which we don't have access to
|
||||
* here. Fortunately we can just pass NULL for now and fill it in
|
||||
* later (hack alert!). The righthand expressions will be
|
||||
* evaluated in our own innerecontext.
|
||||
*/
|
||||
tupDesc = ExecTypeFromTL(leftptlist, false);
|
||||
@ -894,11 +899,11 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
ExecSetParamPlan(SubPlanState * node, ExprContext *econtext)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
PlanState *planstate = node->planstate;
|
||||
SubLinkType subLinkType = subplan->subLinkType;
|
||||
SubLinkType subLinkType = subplan->subLinkType;
|
||||
MemoryContext oldcontext;
|
||||
TupleTableSlot *slot;
|
||||
List *lst;
|
||||
@ -928,7 +933,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
if (subLinkType == EXISTS_SUBLINK)
|
||||
{
|
||||
/* There can be only one param... */
|
||||
int paramid = lfirsti(subplan->setParam);
|
||||
int paramid = lfirsti(subplan->setParam);
|
||||
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
|
||||
|
||||
prm->execPlan = NULL;
|
||||
@ -940,8 +945,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
|
||||
if (subLinkType == ARRAY_SUBLINK)
|
||||
{
|
||||
Datum dvalue;
|
||||
bool disnull;
|
||||
Datum dvalue;
|
||||
bool disnull;
|
||||
|
||||
found = true;
|
||||
/* stash away current value */
|
||||
@ -963,8 +968,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
found = true;
|
||||
|
||||
/*
|
||||
* We need to copy the subplan's tuple into our own context,
|
||||
* in case any of the params are pass-by-ref type --- the pointers
|
||||
* We need to copy the subplan's tuple into our own context, in
|
||||
* case any of the params are pass-by-ref type --- the pointers
|
||||
* stored in the param structs will point at this copied tuple!
|
||||
* node->curTuple keeps track of the copied tuple for eventual
|
||||
* freeing.
|
||||
@ -981,7 +986,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
*/
|
||||
foreach(lst, subplan->setParam)
|
||||
{
|
||||
int paramid = lfirsti(lst);
|
||||
int paramid = lfirsti(lst);
|
||||
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
|
||||
|
||||
prm->execPlan = NULL;
|
||||
@ -995,7 +1000,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
if (subLinkType == EXISTS_SUBLINK)
|
||||
{
|
||||
/* There can be only one param... */
|
||||
int paramid = lfirsti(subplan->setParam);
|
||||
int paramid = lfirsti(subplan->setParam);
|
||||
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
|
||||
|
||||
prm->execPlan = NULL;
|
||||
@ -1006,7 +1011,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
{
|
||||
foreach(lst, subplan->setParam)
|
||||
{
|
||||
int paramid = lfirsti(lst);
|
||||
int paramid = lfirsti(lst);
|
||||
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
|
||||
|
||||
prm->execPlan = NULL;
|
||||
@ -1018,7 +1023,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
else if (subLinkType == ARRAY_SUBLINK)
|
||||
{
|
||||
/* There can be only one param... */
|
||||
int paramid = lfirsti(subplan->setParam);
|
||||
int paramid = lfirsti(subplan->setParam);
|
||||
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
|
||||
|
||||
Assert(astate != NULL);
|
||||
@ -1036,7 +1041,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
ExecEndSubPlan(SubPlanState *node)
|
||||
ExecEndSubPlan(SubPlanState * node)
|
||||
{
|
||||
if (node->needShutdown)
|
||||
{
|
||||
@ -1056,10 +1061,10 @@ ExecEndSubPlan(SubPlanState *node)
|
||||
* Mark an initplan as needing recalculation
|
||||
*/
|
||||
void
|
||||
ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent)
|
||||
ExecReScanSetParamPlan(SubPlanState * node, PlanState * parent)
|
||||
{
|
||||
PlanState *planstate = node->planstate;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
SubPlan *subplan = (SubPlan *) node->xprstate.expr;
|
||||
EState *estate = parent->state;
|
||||
List *lst;
|
||||
|
||||
@ -1080,7 +1085,7 @@ ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent)
|
||||
*/
|
||||
foreach(lst, subplan->setParam)
|
||||
{
|
||||
int paramid = lfirsti(lst);
|
||||
int paramid = lfirsti(lst);
|
||||
ParamExecData *prm = &(estate->es_param_exec_vals[paramid]);
|
||||
|
||||
prm->execPlan = node;
|
||||
|
@ -12,7 +12,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.18 2003/02/09 00:30:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.19 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -160,10 +160,11 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate)
|
||||
Assert(rte->rtekind == RTE_SUBQUERY);
|
||||
|
||||
/*
|
||||
* The subquery needs its own EState because it has its own rangetable.
|
||||
* It shares our Param ID space, however. XXX if rangetable access were
|
||||
* done differently, the subquery could share our EState, which would
|
||||
* eliminate some thrashing about in this module...
|
||||
* The subquery needs its own EState because it has its own
|
||||
* rangetable. It shares our Param ID space, however. XXX if
|
||||
* rangetable access were done differently, the subquery could share
|
||||
* our EState, which would eliminate some thrashing about in this
|
||||
* module...
|
||||
*/
|
||||
sp_estate = CreateExecutorState();
|
||||
subquerystate->sss_SubEState = sp_estate;
|
||||
@ -259,9 +260,9 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
|
||||
|
||||
/*
|
||||
* ExecReScan doesn't know about my subplan, so I have to do
|
||||
* changed-parameter signaling myself. This is just as well,
|
||||
* because the subplan has its own memory context in which its
|
||||
* chgParam state lives.
|
||||
* changed-parameter signaling myself. This is just as well, because
|
||||
* the subplan has its own memory context in which its chgParam state
|
||||
* lives.
|
||||
*/
|
||||
if (node->ss.ps.chgParam != NULL)
|
||||
UpdateChangedParamSet(node->subplan, node->ss.ps.chgParam);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.38 2003/02/02 19:08:57 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.39 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -57,9 +57,9 @@ ExecUnique(UniqueState *node)
|
||||
* now loop, returning only non-duplicate tuples. We assume that the
|
||||
* tuples arrive in sorted order so we can detect duplicates easily.
|
||||
*
|
||||
* We return the first tuple from each group of duplicates (or the
|
||||
* last tuple of each group, when moving backwards). At either end
|
||||
* of the subplan, clear priorTuple so that we correctly return the
|
||||
* We return the first tuple from each group of duplicates (or the last
|
||||
* tuple of each group, when moving backwards). At either end of the
|
||||
* subplan, clear priorTuple so that we correctly return the
|
||||
* first/last tuple when reversing direction.
|
||||
*/
|
||||
for (;;)
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.99 2003/07/21 17:05:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.100 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -95,8 +95,8 @@ SPI_connect(void)
|
||||
/*
|
||||
* Create memory contexts for this procedure
|
||||
*
|
||||
* XXX it would be better to use PortalContext as the parent context,
|
||||
* but we may not be inside a portal (consider deferred-trigger
|
||||
* XXX it would be better to use PortalContext as the parent context, but
|
||||
* we may not be inside a portal (consider deferred-trigger
|
||||
* execution).
|
||||
*/
|
||||
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
|
||||
@ -799,7 +799,7 @@ SPI_cursor_open(const char *name, void *plan, Datum *Values, const char *Nulls)
|
||||
*/
|
||||
PortalDefineQuery(portal,
|
||||
NULL, /* unfortunately don't have sourceText */
|
||||
"SELECT", /* cursor's query is always a SELECT */
|
||||
"SELECT", /* cursor's query is always a SELECT */
|
||||
makeList1(queryTree),
|
||||
makeList1(planTree),
|
||||
PortalGetHeapMemory(portal));
|
||||
@ -1007,9 +1007,9 @@ _SPI_execute(const char *src, int tcount, _SPI_plan *plan)
|
||||
/*
|
||||
* Do parse analysis and rule rewrite for each raw parsetree.
|
||||
*
|
||||
* We save the querytrees from each raw parsetree as a separate
|
||||
* sublist. This allows _SPI_execute_plan() to know where the
|
||||
* boundaries between original queries fall.
|
||||
* We save the querytrees from each raw parsetree as a separate sublist.
|
||||
* This allows _SPI_execute_plan() to know where the boundaries
|
||||
* between original queries fall.
|
||||
*/
|
||||
query_list_list = NIL;
|
||||
plan_list = NIL;
|
||||
@ -1136,8 +1136,8 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
|
||||
|
||||
foreach(query_list_list_item, query_list_list)
|
||||
{
|
||||
List *query_list = lfirst(query_list_list_item);
|
||||
List *query_list_item;
|
||||
List *query_list = lfirst(query_list_list_item);
|
||||
List *query_list_item;
|
||||
|
||||
/* Reset state for each original parsetree */
|
||||
/* (at most one of its querytrees will be marked canSetTag) */
|
||||
@ -1148,7 +1148,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
|
||||
|
||||
foreach(query_list_item, query_list)
|
||||
{
|
||||
Query *queryTree = (Query *) lfirst(query_list_item);
|
||||
Query *queryTree = (Query *) lfirst(query_list_item);
|
||||
Plan *planTree;
|
||||
QueryDesc *qdesc;
|
||||
DestReceiver *dest;
|
||||
@ -1190,10 +1190,10 @@ _SPI_pquery(QueryDesc *queryDesc, bool runit, int tcount)
|
||||
{
|
||||
case CMD_SELECT:
|
||||
res = SPI_OK_SELECT;
|
||||
if (queryDesc->parsetree->into != NULL) /* select into table */
|
||||
if (queryDesc->parsetree->into != NULL) /* select into table */
|
||||
{
|
||||
res = SPI_OK_SELINTO;
|
||||
queryDesc->dest = None_Receiver; /* don't output results */
|
||||
queryDesc->dest = None_Receiver; /* don't output results */
|
||||
}
|
||||
break;
|
||||
case CMD_INSERT:
|
||||
@ -1351,7 +1351,7 @@ _SPI_checktuples(void)
|
||||
SPITupleTable *tuptable = _SPI_current->tuptable;
|
||||
bool failed = false;
|
||||
|
||||
if (tuptable == NULL) /* spi_dest_startup was not called */
|
||||
if (tuptable == NULL) /* spi_dest_startup was not called */
|
||||
failed = true;
|
||||
else if (processed != (tuptable->alloced - tuptable->free))
|
||||
failed = true;
|
||||
@ -1372,7 +1372,8 @@ _SPI_copy_plan(_SPI_plan *plan, int location)
|
||||
parentcxt = _SPI_current->procCxt;
|
||||
else if (location == _SPI_CPLAN_TOPCXT)
|
||||
parentcxt = TopMemoryContext;
|
||||
else /* (this case not currently used) */
|
||||
else
|
||||
/* (this case not currently used) */
|
||||
parentcxt = CurrentMemoryContext;
|
||||
|
||||
/*
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/tstoreReceiver.c,v 1.6 2003/05/08 18:16:36 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/tstoreReceiver.c,v 1.7 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -21,10 +21,10 @@
|
||||
|
||||
typedef struct
|
||||
{
|
||||
DestReceiver pub;
|
||||
Tuplestorestate *tstore;
|
||||
MemoryContext cxt;
|
||||
} TStoreState;
|
||||
DestReceiver pub;
|
||||
Tuplestorestate *tstore;
|
||||
MemoryContext cxt;
|
||||
} TStoreState;
|
||||
|
||||
|
||||
/*
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: stringinfo.c,v 1.34 2003/04/24 21:16:43 tgl Exp $
|
||||
* $Id: stringinfo.c,v 1.35 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -62,7 +62,7 @@ initStringInfo(StringInfo str)
|
||||
* strcat.
|
||||
*/
|
||||
void
|
||||
appendStringInfo(StringInfo str, const char *fmt, ...)
|
||||
appendStringInfo(StringInfo str, const char *fmt,...)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
@ -86,7 +86,7 @@ appendStringInfo(StringInfo str, const char *fmt, ...)
|
||||
* appendStringInfoVA
|
||||
*
|
||||
* Attempt to format text data under the control of fmt (an sprintf-style
|
||||
* format string) and append it to whatever is already in str. If successful
|
||||
* format string) and append it to whatever is already in str. If successful
|
||||
* return true; if not (because there's not enough space), return false
|
||||
* without modifying str. Typically the caller would enlarge str and retry
|
||||
* on false return --- see appendStringInfo for standard usage pattern.
|
||||
@ -113,9 +113,9 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Assert check here is to catch buggy vsnprintf that overruns
|
||||
* the specified buffer length. Solaris 7 in 64-bit mode is
|
||||
* an example of a platform with such a bug.
|
||||
* Assert check here is to catch buggy vsnprintf that overruns the
|
||||
* specified buffer length. Solaris 7 in 64-bit mode is an example of
|
||||
* a platform with such a bug.
|
||||
*/
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
str->data[str->maxlen - 1] = '\0';
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.108 2003/07/28 06:27:06 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.109 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -221,8 +221,8 @@ pg_krb5_init(void)
|
||||
if (retval)
|
||||
{
|
||||
ereport(LOG,
|
||||
(errmsg("kerberos sname_to_principal(\"%s\") returned error %d",
|
||||
PG_KRB_SRVNAM, retval)));
|
||||
(errmsg("kerberos sname_to_principal(\"%s\") returned error %d",
|
||||
PG_KRB_SRVNAM, retval)));
|
||||
com_err("postgres", retval,
|
||||
"while getting server principal for service \"%s\"",
|
||||
PG_KRB_SRVNAM);
|
||||
@ -432,7 +432,7 @@ ClientAuthentication(Port *port)
|
||||
* out the less clueful good guys.
|
||||
*/
|
||||
{
|
||||
char hostinfo[NI_MAXHOST];
|
||||
char hostinfo[NI_MAXHOST];
|
||||
|
||||
getnameinfo_all(&port->raddr.addr, port->raddr.salen,
|
||||
hostinfo, sizeof(hostinfo),
|
||||
@ -441,15 +441,15 @@ ClientAuthentication(Port *port)
|
||||
|
||||
#ifdef USE_SSL
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
|
||||
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
|
||||
hostinfo, port->user_name, port->database_name,
|
||||
port->ssl ? gettext("SSL on") : gettext("SSL off"))));
|
||||
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
|
||||
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
|
||||
hostinfo, port->user_name, port->database_name,
|
||||
port->ssl ? gettext("SSL on") : gettext("SSL off"))));
|
||||
#else
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
|
||||
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
|
||||
hostinfo, port->user_name, port->database_name)));
|
||||
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
|
||||
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
|
||||
hostinfo, port->user_name, port->database_name)));
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
@ -460,7 +460,7 @@ ClientAuthentication(Port *port)
|
||||
|| port->laddr.addr.ss_family != AF_INET)
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("kerberos 4 only supports IPv4 connections")));
|
||||
errmsg("kerberos 4 only supports IPv4 connections")));
|
||||
sendAuthRequest(port, AUTH_REQ_KRB4);
|
||||
status = pg_krb4_recvauth(port);
|
||||
break;
|
||||
@ -492,7 +492,7 @@ ClientAuthentication(Port *port)
|
||||
if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0)
|
||||
ereport(FATAL,
|
||||
(errcode_for_socket_access(),
|
||||
errmsg("failed to enable credential receipt: %m")));
|
||||
errmsg("failed to enable credential receipt: %m")));
|
||||
}
|
||||
#endif
|
||||
if (port->raddr.addr.ss_family == AF_UNIX)
|
||||
@ -755,22 +755,22 @@ recv_password_packet(Port *port)
|
||||
if (PG_PROTOCOL_MAJOR(port->proto) >= 3)
|
||||
{
|
||||
/* Expect 'p' message type */
|
||||
int mtype;
|
||||
int mtype;
|
||||
|
||||
mtype = pq_getbyte();
|
||||
if (mtype != 'p')
|
||||
{
|
||||
/*
|
||||
* If the client just disconnects without offering a password,
|
||||
* don't make a log entry. This is legal per protocol spec and
|
||||
* in fact commonly done by psql, so complaining just clutters
|
||||
* the log.
|
||||
* don't make a log entry. This is legal per protocol spec
|
||||
* and in fact commonly done by psql, so complaining just
|
||||
* clutters the log.
|
||||
*/
|
||||
if (mtype != EOF)
|
||||
ereport(COMMERROR,
|
||||
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
||||
errmsg("expected password response, got msg type %d",
|
||||
mtype)));
|
||||
errmsg("expected password response, got msg type %d",
|
||||
mtype)));
|
||||
return NULL; /* EOF or bad message type */
|
||||
}
|
||||
}
|
||||
@ -782,7 +782,7 @@ recv_password_packet(Port *port)
|
||||
}
|
||||
|
||||
initStringInfo(&buf);
|
||||
if (pq_getmessage(&buf, 1000)) /* receive password */
|
||||
if (pq_getmessage(&buf, 1000)) /* receive password */
|
||||
{
|
||||
/* EOF - pq_getmessage already logged a suitable message */
|
||||
pfree(buf.data);
|
||||
@ -804,7 +804,7 @@ recv_password_packet(Port *port)
|
||||
(errmsg("received password packet")));
|
||||
|
||||
/*
|
||||
* Return the received string. Note we do not attempt to do any
|
||||
* Return the received string. Note we do not attempt to do any
|
||||
* character-set conversion on it; since we don't yet know the
|
||||
* client's encoding, there wouldn't be much point.
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.66 2003/07/28 00:09:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.67 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This should be moved to a more appropriate place. It is here
|
||||
@ -372,7 +372,7 @@ lo_import(PG_FUNCTION_ARGS)
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to use server-side lo_import()"),
|
||||
errmsg("must be superuser to use server-side lo_import()"),
|
||||
errhint("Anyone can use the client-side lo_import() provided by libpq.")));
|
||||
#endif
|
||||
|
||||
@ -439,7 +439,7 @@ lo_export(PG_FUNCTION_ARGS)
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to use server-side lo_export()"),
|
||||
errmsg("must be superuser to use server-side lo_export()"),
|
||||
errhint("Anyone can use the client-side lo_export() provided by libpq.")));
|
||||
#endif
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.37 2003/07/27 21:49:53 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.38 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
* Since the server static private key ($DataDir/server.key)
|
||||
* will normally be stored unencrypted so that the database
|
||||
@ -187,7 +187,6 @@ OvOzKGtwcTqO/1wV5gKkzu1ZVswVUQd5Gg8lJicwqRWyyNRczDDoG9jVDxmogKTH\n\
|
||||
AaqLulO7R8Ifa1SwF2DteSGVtgWEN8gDpN3RBmmPTDngyF2DHb5qmpnznwtFKdTL\n\
|
||||
KWbuHn491xNO25CQWMtem80uKw+pTnisBRF/454n1Jnhub144YRBoN8CAQI=\n\
|
||||
-----END DH PARAMETERS-----\n";
|
||||
|
||||
#endif
|
||||
|
||||
/* ------------------------------------------------------------ */
|
||||
@ -258,7 +257,7 @@ secure_read(Port *port, void *ptr, size_t len)
|
||||
#ifdef USE_SSL
|
||||
if (port->ssl)
|
||||
{
|
||||
rloop:
|
||||
rloop:
|
||||
n = SSL_read(port->ssl, ptr, len);
|
||||
switch (SSL_get_error(port->ssl, n))
|
||||
{
|
||||
@ -328,7 +327,7 @@ secure_write(Port *port, void *ptr, size_t len)
|
||||
if (port->ssl->state != SSL_ST_OK)
|
||||
ereport(COMMERROR,
|
||||
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
||||
errmsg("SSL failed to send renegotiation request")));
|
||||
errmsg("SSL failed to send renegotiation request")));
|
||||
port->ssl->state |= SSL_ST_ACCEPT;
|
||||
SSL_do_handshake(port->ssl);
|
||||
if (port->ssl->state != SSL_ST_OK)
|
||||
@ -338,7 +337,7 @@ secure_write(Port *port, void *ptr, size_t len)
|
||||
port->count = 0;
|
||||
}
|
||||
|
||||
wloop:
|
||||
wloop:
|
||||
n = SSL_write(port->ssl, ptr, len);
|
||||
switch (SSL_get_error(port->ssl, n))
|
||||
{
|
||||
@ -436,7 +435,7 @@ load_dh_file(int keylength)
|
||||
(codes & DH_CHECK_P_NOT_SAFE_PRIME))
|
||||
{
|
||||
elog(LOG,
|
||||
"DH error (%s): neither suitable generator or safe prime",
|
||||
"DH error (%s): neither suitable generator or safe prime",
|
||||
fnbuf);
|
||||
return NULL;
|
||||
}
|
||||
@ -620,21 +619,21 @@ initialize_SSL(void)
|
||||
if (!SSL_CTX_use_certificate_file(SSL_context, fnbuf, SSL_FILETYPE_PEM))
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_CONFIG_FILE_ERROR),
|
||||
errmsg("could not load server certificate file \"%s\": %s",
|
||||
fnbuf, SSLerrmessage())));
|
||||
errmsg("could not load server certificate file \"%s\": %s",
|
||||
fnbuf, SSLerrmessage())));
|
||||
|
||||
snprintf(fnbuf, sizeof(fnbuf), "%s/server.key", DataDir);
|
||||
if (stat(fnbuf, &buf) == -1)
|
||||
ereport(FATAL,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access private key file \"%s\": %m",
|
||||
fnbuf)));
|
||||
errmsg("could not access private key file \"%s\": %m",
|
||||
fnbuf)));
|
||||
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
|
||||
buf.st_uid != getuid())
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_CONFIG_FILE_ERROR),
|
||||
errmsg("unsafe permissions on private key file \"%s\"",
|
||||
fnbuf),
|
||||
errmsg("unsafe permissions on private key file \"%s\"",
|
||||
fnbuf),
|
||||
errdetail("File must be owned by the database user and must have no permissions for \"group\" or \"other\".")));
|
||||
|
||||
if (!SSL_CTX_use_PrivateKey_file(SSL_context, fnbuf, SSL_FILETYPE_PEM))
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.54 2003/07/22 19:00:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.55 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -119,7 +119,10 @@ md5_crypt_verify(const Port *port, const char *user, char *client_pass)
|
||||
default:
|
||||
if (isMD5(shadow_pass))
|
||||
{
|
||||
/* Encrypt user-supplied password to match MD5 in pg_shadow */
|
||||
/*
|
||||
* Encrypt user-supplied password to match MD5 in
|
||||
* pg_shadow
|
||||
*/
|
||||
crypt_client_pass = palloc(MD5_PASSWD_LEN + 1);
|
||||
if (!EncryptMD5(client_pass,
|
||||
port->user_name,
|
||||
|
@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.109 2003/08/01 23:24:28 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.110 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -392,7 +392,7 @@ get_group_line(const char *group)
|
||||
/*
|
||||
* Lookup a user name in the pg_shadow file
|
||||
*/
|
||||
List **
|
||||
List **
|
||||
get_user_line(const char *user)
|
||||
{
|
||||
return (List **) bsearch((void *) user,
|
||||
@ -416,7 +416,7 @@ check_group(char *group, char *user)
|
||||
{
|
||||
foreach(l, lnext(lnext(*line)))
|
||||
if (strcmp(lfirst(l), user) == 0)
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -547,13 +547,14 @@ static void
|
||||
parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
|
||||
{
|
||||
int line_number;
|
||||
char *token;
|
||||
char *db;
|
||||
char *user;
|
||||
struct addrinfo *file_ip_addr = NULL, *file_ip_mask = NULL;
|
||||
struct addrinfo hints;
|
||||
struct sockaddr_storage *mask;
|
||||
char *cidr_slash;
|
||||
char *token;
|
||||
char *db;
|
||||
char *user;
|
||||
struct addrinfo *file_ip_addr = NULL,
|
||||
*file_ip_mask = NULL;
|
||||
struct addrinfo hints;
|
||||
struct sockaddr_storage *mask;
|
||||
char *cidr_slash;
|
||||
int ret;
|
||||
|
||||
Assert(line != NIL);
|
||||
@ -595,11 +596,11 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
|
||||
return;
|
||||
}
|
||||
else if (strcmp(token, "host") == 0
|
||||
|| strcmp(token, "hostssl") == 0
|
||||
|| strcmp(token, "hostnossl") == 0)
|
||||
|| strcmp(token, "hostssl") == 0
|
||||
|| strcmp(token, "hostnossl") == 0)
|
||||
{
|
||||
|
||||
if (token[4] == 's') /* "hostssl" */
|
||||
if (token[4] == 's') /* "hostssl" */
|
||||
{
|
||||
#ifdef USE_SSL
|
||||
/* Record does not match if we are not on an SSL connection */
|
||||
@ -616,7 +617,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
|
||||
#endif
|
||||
}
|
||||
#ifdef USE_SSL
|
||||
else if (token[4] == 'n') /* "hostnossl" */
|
||||
else if (token[4] == 'n') /* "hostnossl" */
|
||||
{
|
||||
/* Record does not match if we are on an SSL connection */
|
||||
if (port->ssl)
|
||||
@ -643,7 +644,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
|
||||
token = lfirst(line);
|
||||
|
||||
/* Check if it has a CIDR suffix and if so isolate it */
|
||||
cidr_slash = strchr(token,'/');
|
||||
cidr_slash = strchr(token, '/');
|
||||
if (cidr_slash)
|
||||
*cidr_slash = '\0';
|
||||
|
||||
@ -698,7 +699,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
|
||||
if (ret || !file_ip_mask)
|
||||
goto hba_syntax;
|
||||
|
||||
mask = (struct sockaddr_storage *)file_ip_mask->ai_addr;
|
||||
mask = (struct sockaddr_storage *) file_ip_mask->ai_addr;
|
||||
|
||||
if (file_ip_addr->ai_family != mask->ss_family)
|
||||
goto hba_syntax;
|
||||
@ -714,7 +715,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
|
||||
|
||||
/* Must meet network restrictions */
|
||||
if (!rangeSockAddr(&port->raddr.addr,
|
||||
(struct sockaddr_storage *)file_ip_addr->ai_addr,
|
||||
(struct sockaddr_storage *) file_ip_addr->ai_addr,
|
||||
mask))
|
||||
goto hba_freeaddr;
|
||||
|
||||
@ -743,8 +744,8 @@ hba_syntax:
|
||||
else
|
||||
ereport(LOG,
|
||||
(errcode(ERRCODE_CONFIG_FILE_ERROR),
|
||||
errmsg("missing field in pg_hba.conf file at end of line %d",
|
||||
line_number)));
|
||||
errmsg("missing field in pg_hba.conf file at end of line %d",
|
||||
line_number)));
|
||||
|
||||
*error_p = true;
|
||||
|
||||
@ -1012,8 +1013,8 @@ ident_syntax:
|
||||
else
|
||||
ereport(LOG,
|
||||
(errcode(ERRCODE_CONFIG_FILE_ERROR),
|
||||
errmsg("missing entry in pg_ident.conf file at end of line %d",
|
||||
line_number)));
|
||||
errmsg("missing entry in pg_ident.conf file at end of line %d",
|
||||
line_number)));
|
||||
|
||||
*error_p = true;
|
||||
}
|
||||
@ -1044,7 +1045,7 @@ check_ident_usermap(const char *usermap_name,
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode(ERRCODE_CONFIG_FILE_ERROR),
|
||||
errmsg("cannot use IDENT authentication without usermap field")));
|
||||
errmsg("cannot use IDENT authentication without usermap field")));
|
||||
found_entry = false;
|
||||
}
|
||||
else if (strcmp(usermap_name, "sameuser") == 0)
|
||||
@ -1215,11 +1216,13 @@ ident_inet(const SockAddr remote_addr,
|
||||
char ident_port[NI_MAXSERV];
|
||||
char ident_query[80];
|
||||
char ident_response[80 + IDENT_USERNAME_MAX];
|
||||
struct addrinfo *ident_serv = NULL, *la = NULL, hints;
|
||||
struct addrinfo *ident_serv = NULL,
|
||||
*la = NULL,
|
||||
hints;
|
||||
|
||||
/*
|
||||
* Might look a little weird to first convert it to text and
|
||||
* then back to sockaddr, but it's protocol independent.
|
||||
* Might look a little weird to first convert it to text and then back
|
||||
* to sockaddr, but it's protocol independent.
|
||||
*/
|
||||
getnameinfo_all(&remote_addr.addr, remote_addr.salen,
|
||||
remote_addr_s, sizeof(remote_addr_s),
|
||||
@ -1254,22 +1257,23 @@ ident_inet(const SockAddr remote_addr,
|
||||
rc = getaddrinfo_all(local_addr_s, NULL, &hints, &la);
|
||||
if (rc || !la)
|
||||
return false; /* we don't expect this to happen */
|
||||
|
||||
|
||||
sock_fd = socket(ident_serv->ai_family, ident_serv->ai_socktype,
|
||||
ident_serv->ai_protocol);
|
||||
if (sock_fd < 0)
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_socket_access(),
|
||||
errmsg("could not create socket for IDENT connection: %m")));
|
||||
errmsg("could not create socket for IDENT connection: %m")));
|
||||
ident_return = false;
|
||||
goto ident_inet_done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bind to the address which the client originally contacted,
|
||||
* otherwise the ident server won't be able to match up the right
|
||||
* connection. This is necessary if the PostgreSQL server is
|
||||
* running on an IP alias.
|
||||
* connection. This is necessary if the PostgreSQL server is running
|
||||
* on an IP alias.
|
||||
*/
|
||||
rc = bind(sock_fd, la->ai_addr, la->ai_addrlen);
|
||||
if (rc != 0)
|
||||
@ -1282,7 +1286,7 @@ ident_inet(const SockAddr remote_addr,
|
||||
goto ident_inet_done;
|
||||
}
|
||||
|
||||
rc = connect(sock_fd, ident_serv->ai_addr,
|
||||
rc = connect(sock_fd, ident_serv->ai_addr,
|
||||
ident_serv->ai_addrlen);
|
||||
if (rc != 0)
|
||||
{
|
||||
@ -1354,12 +1358,12 @@ ident_unix(int sock, char *ident_user)
|
||||
{
|
||||
#if defined(HAVE_GETPEEREID)
|
||||
/* OpenBSD style: */
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
struct passwd *pass;
|
||||
|
||||
errno = 0;
|
||||
if (getpeereid(sock,&uid,&gid) != 0)
|
||||
if (getpeereid(sock, &uid, &gid) != 0)
|
||||
{
|
||||
/* We didn't get a valid credentials struct. */
|
||||
ereport(LOG,
|
||||
@ -1491,8 +1495,7 @@ ident_unix(int sock, char *ident_user)
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* HAVE_UNIX_SOCKETS */
|
||||
#endif /* HAVE_UNIX_SOCKETS */
|
||||
|
||||
|
||||
/*
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/ip.c,v 1.17 2003/08/01 17:53:41 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/ip.c,v 1.18 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
* This file and the IPV6 implementation were initially provided by
|
||||
* Nigel Kukard <nkukard@lbsd.net>, Linux Based Systems Design
|
||||
@ -34,30 +34,30 @@
|
||||
#endif
|
||||
#include <arpa/inet.h>
|
||||
#include <sys/file.h>
|
||||
|
||||
#endif
|
||||
|
||||
#include "libpq/ip.h"
|
||||
|
||||
|
||||
static int rangeSockAddrAF_INET(const struct sockaddr_in *addr,
|
||||
const struct sockaddr_in *netaddr,
|
||||
const struct sockaddr_in *netmask);
|
||||
static int rangeSockAddrAF_INET(const struct sockaddr_in * addr,
|
||||
const struct sockaddr_in * netaddr,
|
||||
const struct sockaddr_in * netmask);
|
||||
|
||||
#ifdef HAVE_IPV6
|
||||
static int rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
|
||||
const struct sockaddr_in6 *netaddr,
|
||||
const struct sockaddr_in6 *netmask);
|
||||
static int rangeSockAddrAF_INET6(const struct sockaddr_in6 * addr,
|
||||
const struct sockaddr_in6 * netaddr,
|
||||
const struct sockaddr_in6 * netmask);
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
static int getaddrinfo_unix(const char *path,
|
||||
const struct addrinfo *hintsp,
|
||||
struct addrinfo **result);
|
||||
static int getaddrinfo_unix(const char *path,
|
||||
const struct addrinfo * hintsp,
|
||||
struct addrinfo ** result);
|
||||
|
||||
static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
|
||||
char *node, int nodelen,
|
||||
char *service, int servicelen,
|
||||
int flags);
|
||||
static int getnameinfo_unix(const struct sockaddr_un * sa, int salen,
|
||||
char *node, int nodelen,
|
||||
char *service, int servicelen,
|
||||
int flags);
|
||||
#endif
|
||||
|
||||
|
||||
@ -66,7 +66,7 @@ static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
|
||||
*/
|
||||
int
|
||||
getaddrinfo_all(const char *hostname, const char *servname,
|
||||
const struct addrinfo *hintp, struct addrinfo **result)
|
||||
const struct addrinfo * hintp, struct addrinfo ** result)
|
||||
{
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
if (hintp != NULL && hintp->ai_family == AF_UNIX)
|
||||
@ -89,7 +89,7 @@ getaddrinfo_all(const char *hostname, const char *servname,
|
||||
* not safe to look at ai_family in the addrinfo itself.
|
||||
*/
|
||||
void
|
||||
freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
|
||||
freeaddrinfo_all(int hint_ai_family, struct addrinfo * ai)
|
||||
{
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
if (hint_ai_family == AF_UNIX)
|
||||
@ -123,12 +123,12 @@ freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
|
||||
* guaranteed to be filled with something even on failure return.
|
||||
*/
|
||||
int
|
||||
getnameinfo_all(const struct sockaddr_storage *addr, int salen,
|
||||
getnameinfo_all(const struct sockaddr_storage * addr, int salen,
|
||||
char *node, int nodelen,
|
||||
char *service, int servicelen,
|
||||
int flags)
|
||||
{
|
||||
int rc;
|
||||
int rc;
|
||||
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
if (addr && addr->ss_family == AF_UNIX)
|
||||
@ -166,8 +166,8 @@ getnameinfo_all(const struct sockaddr_storage *addr, int salen,
|
||||
* -------
|
||||
*/
|
||||
static int
|
||||
getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
|
||||
struct addrinfo **result)
|
||||
getaddrinfo_unix(const char *path, const struct addrinfo * hintsp,
|
||||
struct addrinfo ** result)
|
||||
{
|
||||
struct addrinfo hints;
|
||||
struct addrinfo *aip;
|
||||
@ -178,9 +178,7 @@ getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
|
||||
MemSet(&hints, 0, sizeof(hints));
|
||||
|
||||
if (strlen(path) >= sizeof(unp->sun_path))
|
||||
{
|
||||
return EAI_FAIL;
|
||||
}
|
||||
|
||||
if (hintsp == NULL)
|
||||
{
|
||||
@ -234,139 +232,123 @@ getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
|
||||
* Convert an address to a hostname.
|
||||
*/
|
||||
static int
|
||||
getnameinfo_unix(const struct sockaddr_un *sa, int salen,
|
||||
getnameinfo_unix(const struct sockaddr_un * sa, int salen,
|
||||
char *node, int nodelen,
|
||||
char *service, int servicelen,
|
||||
int flags)
|
||||
{
|
||||
int ret = -1;
|
||||
int ret = -1;
|
||||
|
||||
/* Invalid arguments. */
|
||||
if (sa == NULL || sa->sun_family != AF_UNIX ||
|
||||
(node == NULL && service == NULL))
|
||||
{
|
||||
return EAI_FAIL;
|
||||
}
|
||||
|
||||
/* We don't support those. */
|
||||
if ((node && !(flags & NI_NUMERICHOST))
|
||||
|| (service && !(flags & NI_NUMERICSERV)))
|
||||
{
|
||||
return EAI_FAIL;
|
||||
}
|
||||
|
||||
if (node)
|
||||
{
|
||||
ret = snprintf(node, nodelen, "%s", "localhost");
|
||||
if (ret == -1 || ret > nodelen)
|
||||
{
|
||||
return EAI_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (service)
|
||||
{
|
||||
ret = snprintf(service, servicelen, "%s", sa->sun_path);
|
||||
if (ret == -1 || ret > servicelen)
|
||||
{
|
||||
return EAI_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* HAVE_UNIX_SOCKETS */
|
||||
|
||||
|
||||
int
|
||||
rangeSockAddr(const struct sockaddr_storage *addr,
|
||||
const struct sockaddr_storage *netaddr,
|
||||
const struct sockaddr_storage *netmask)
|
||||
rangeSockAddr(const struct sockaddr_storage * addr,
|
||||
const struct sockaddr_storage * netaddr,
|
||||
const struct sockaddr_storage * netmask)
|
||||
{
|
||||
if (addr->ss_family == AF_INET)
|
||||
return rangeSockAddrAF_INET((struct sockaddr_in *)addr,
|
||||
(struct sockaddr_in *)netaddr,
|
||||
(struct sockaddr_in *)netmask);
|
||||
return rangeSockAddrAF_INET((struct sockaddr_in *) addr,
|
||||
(struct sockaddr_in *) netaddr,
|
||||
(struct sockaddr_in *) netmask);
|
||||
#ifdef HAVE_IPV6
|
||||
else if (addr->ss_family == AF_INET6)
|
||||
return rangeSockAddrAF_INET6((struct sockaddr_in6 *)addr,
|
||||
(struct sockaddr_in6 *)netaddr,
|
||||
(struct sockaddr_in6 *)netmask);
|
||||
return rangeSockAddrAF_INET6((struct sockaddr_in6 *) addr,
|
||||
(struct sockaddr_in6 *) netaddr,
|
||||
(struct sockaddr_in6 *) netmask);
|
||||
#endif
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* SockAddr_cidr_mask - make a network mask of the appropriate family
|
||||
* and required number of significant bits
|
||||
* SockAddr_cidr_mask - make a network mask of the appropriate family
|
||||
* and required number of significant bits
|
||||
*
|
||||
* Note: Returns a static pointer for the mask, so it's not thread safe,
|
||||
* and a second call will overwrite the data.
|
||||
* and a second call will overwrite the data.
|
||||
*/
|
||||
int
|
||||
SockAddr_cidr_mask(struct sockaddr_storage **mask, char *numbits, int family)
|
||||
SockAddr_cidr_mask(struct sockaddr_storage ** mask, char *numbits, int family)
|
||||
{
|
||||
long bits;
|
||||
char *endptr;
|
||||
static struct sockaddr_storage sock;
|
||||
struct sockaddr_in mask4;
|
||||
long bits;
|
||||
char *endptr;
|
||||
static struct sockaddr_storage sock;
|
||||
struct sockaddr_in mask4;
|
||||
|
||||
#ifdef HAVE_IPV6
|
||||
struct sockaddr_in6 mask6;
|
||||
struct sockaddr_in6 mask6;
|
||||
#endif
|
||||
|
||||
bits = strtol(numbits, &endptr, 10);
|
||||
|
||||
if (*numbits == '\0' || *endptr != '\0')
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((bits < 0) || (family == AF_INET && bits > 32)
|
||||
#ifdef HAVE_IPV6
|
||||
|| (family == AF_INET6 && bits > 128)
|
||||
#endif
|
||||
)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
*mask = &sock;
|
||||
|
||||
switch (family)
|
||||
{
|
||||
case AF_INET:
|
||||
mask4.sin_addr.s_addr =
|
||||
mask4.sin_addr.s_addr =
|
||||
htonl((0xffffffffUL << (32 - bits))
|
||||
& 0xffffffffUL);
|
||||
memcpy(&sock, &mask4, sizeof(mask4));
|
||||
& 0xffffffffUL);
|
||||
memcpy(&sock, &mask4, sizeof(mask4));
|
||||
break;
|
||||
#ifdef HAVE_IPV6
|
||||
case AF_INET6:
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
{
|
||||
if (bits <= 0)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
{
|
||||
mask6.sin6_addr.s6_addr[i] = 0;
|
||||
if (bits <= 0)
|
||||
mask6.sin6_addr.s6_addr[i] = 0;
|
||||
else if (bits >= 8)
|
||||
mask6.sin6_addr.s6_addr[i] = 0xff;
|
||||
else
|
||||
{
|
||||
mask6.sin6_addr.s6_addr[i] =
|
||||
(0xff << (8 - bits)) & 0xff;
|
||||
}
|
||||
bits -= 8;
|
||||
}
|
||||
else if (bits >= 8)
|
||||
{
|
||||
mask6.sin6_addr.s6_addr[i] = 0xff;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask6.sin6_addr.s6_addr[i] =
|
||||
(0xff << (8 - bits)) & 0xff;
|
||||
}
|
||||
bits -= 8;
|
||||
memcpy(&sock, &mask6, sizeof(mask6));
|
||||
break;
|
||||
}
|
||||
memcpy(&sock, &mask6, sizeof(mask6));
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
return -1;
|
||||
@ -377,8 +359,8 @@ static struct sockaddr_storage sock;
|
||||
}
|
||||
|
||||
static int
|
||||
rangeSockAddrAF_INET(const struct sockaddr_in *addr, const struct sockaddr_in *netaddr,
|
||||
const struct sockaddr_in *netmask)
|
||||
rangeSockAddrAF_INET(const struct sockaddr_in * addr, const struct sockaddr_in * netaddr,
|
||||
const struct sockaddr_in * netmask)
|
||||
{
|
||||
if (((addr->sin_addr.s_addr ^ netaddr->sin_addr.s_addr) &
|
||||
netmask->sin_addr.s_addr) == 0)
|
||||
@ -390,9 +372,9 @@ rangeSockAddrAF_INET(const struct sockaddr_in *addr, const struct sockaddr_in *n
|
||||
|
||||
#ifdef HAVE_IPV6
|
||||
static int
|
||||
rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
|
||||
const struct sockaddr_in6 *netaddr,
|
||||
const struct sockaddr_in6 *netmask)
|
||||
rangeSockAddrAF_INET6(const struct sockaddr_in6 * addr,
|
||||
const struct sockaddr_in6 * netaddr,
|
||||
const struct sockaddr_in6 * netmask)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -405,6 +387,5 @@ rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.19 2002/10/03 17:09:41 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.20 2003/08/04 00:43:18 momjian Exp $
|
||||
*/
|
||||
|
||||
|
||||
@ -35,8 +35,8 @@
|
||||
#include "postgres_fe.h"
|
||||
#ifndef WIN32
|
||||
#include "libpq/crypt.h"
|
||||
#endif /* WIN32 */
|
||||
#endif /* FRONTEND */
|
||||
#endif /* WIN32 */
|
||||
#endif /* FRONTEND */
|
||||
|
||||
#ifdef MD5_ODBC
|
||||
#include "md5.h"
|
||||
|
@ -30,7 +30,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/pqcomm.c,v 1.161 2003/07/27 21:49:53 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/pqcomm.c,v 1.162 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -151,6 +151,7 @@ pq_close(void)
|
||||
{
|
||||
/* Cleanly shut down SSL layer */
|
||||
secure_close(MyProcPort);
|
||||
|
||||
/*
|
||||
* Formerly we did an explicit close() here, but it seems better
|
||||
* to leave the socket open until the process dies. This allows
|
||||
@ -208,10 +209,11 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
int maxconn;
|
||||
int one = 1;
|
||||
int ret;
|
||||
char portNumberStr[64];
|
||||
char *service;
|
||||
struct addrinfo *addrs = NULL, *addr;
|
||||
struct addrinfo hint;
|
||||
char portNumberStr[64];
|
||||
char *service;
|
||||
struct addrinfo *addrs = NULL,
|
||||
*addr;
|
||||
struct addrinfo hint;
|
||||
int listen_index = 0;
|
||||
int added = 0;
|
||||
|
||||
@ -245,8 +247,8 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
hostName, service, gai_strerror(ret))));
|
||||
else
|
||||
ereport(LOG,
|
||||
(errmsg("could not translate service \"%s\" to address: %s",
|
||||
service, gai_strerror(ret))));
|
||||
(errmsg("could not translate service \"%s\" to address: %s",
|
||||
service, gai_strerror(ret))));
|
||||
freeaddrinfo_all(hint.ai_family, addrs);
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
@ -255,9 +257,9 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
{
|
||||
if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family))
|
||||
{
|
||||
/* Only set up a unix domain socket when
|
||||
* they really asked for it. The service/port
|
||||
* is different in that case.
|
||||
/*
|
||||
* Only set up a unix domain socket when they really asked for
|
||||
* it. The service/port is different in that case.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
@ -285,7 +287,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
if (!IS_AF_UNIX(addr->ai_family))
|
||||
{
|
||||
if ((setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
|
||||
(char *) &one, sizeof(one))) == -1)
|
||||
(char *) &one, sizeof(one))) == -1)
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_socket_access(),
|
||||
@ -299,7 +301,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
if (addr->ai_family == AF_INET6)
|
||||
{
|
||||
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY,
|
||||
(char *)&one, sizeof(one)) == -1)
|
||||
(char *) &one, sizeof(one)) == -1)
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_socket_access(),
|
||||
@ -311,10 +313,10 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Note: This might fail on some OS's, like Linux
|
||||
* older than 2.4.21-pre3, that don't have the IPV6_V6ONLY
|
||||
* socket option, and map ipv4 addresses to ipv6. It will
|
||||
* show ::ffff:ipv4 for all ipv4 connections.
|
||||
* Note: This might fail on some OS's, like Linux older than
|
||||
* 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and
|
||||
* map ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all
|
||||
* ipv4 connections.
|
||||
*/
|
||||
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
|
||||
if (err < 0)
|
||||
@ -323,12 +325,12 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
(errcode_for_socket_access(),
|
||||
errmsg("failed to bind server socket: %m"),
|
||||
(IS_AF_UNIX(addr->ai_family)) ?
|
||||
errhint("Is another postmaster already running on port %d?"
|
||||
" If not, remove socket node \"%s\" and retry.",
|
||||
(int) portNumber, sock_path) :
|
||||
errhint("Is another postmaster already running on port %d?"
|
||||
" If not, wait a few seconds and retry.",
|
||||
(int) portNumber)));
|
||||
errhint("Is another postmaster already running on port %d?"
|
||||
" If not, remove socket node \"%s\" and retry.",
|
||||
(int) portNumber, sock_path) :
|
||||
errhint("Is another postmaster already running on port %d?"
|
||||
" If not, wait a few seconds and retry.",
|
||||
(int) portNumber)));
|
||||
closesocket(fd);
|
||||
continue;
|
||||
}
|
||||
@ -345,10 +347,10 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Select appropriate accept-queue length limit. PG_SOMAXCONN
|
||||
* is only intended to provide a clamp on the request on
|
||||
* platforms where an overly large request provokes a kernel
|
||||
* error (are there any?).
|
||||
* Select appropriate accept-queue length limit. PG_SOMAXCONN is
|
||||
* only intended to provide a clamp on the request on platforms
|
||||
* where an overly large request provokes a kernel error (are
|
||||
* there any?).
|
||||
*/
|
||||
maxconn = MaxBackends * 2;
|
||||
if (maxconn > PG_SOMAXCONN)
|
||||
@ -465,7 +467,6 @@ Setup_AF_UNIX(void)
|
||||
}
|
||||
return STATUS_OK;
|
||||
}
|
||||
|
||||
#endif /* HAVE_UNIX_SOCKETS */
|
||||
|
||||
|
||||
@ -485,8 +486,8 @@ StreamConnection(int server_fd, Port *port)
|
||||
/* accept connection and fill in the client (remote) address */
|
||||
port->raddr.salen = sizeof(port->raddr.addr);
|
||||
if ((port->sock = accept(server_fd,
|
||||
(struct sockaddr *) &port->raddr.addr,
|
||||
&port->raddr.salen)) < 0)
|
||||
(struct sockaddr *) & port->raddr.addr,
|
||||
&port->raddr.salen)) < 0)
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_socket_access(),
|
||||
@ -495,6 +496,7 @@ StreamConnection(int server_fd, Port *port)
|
||||
}
|
||||
|
||||
#ifdef SCO_ACCEPT_BUG
|
||||
|
||||
/*
|
||||
* UnixWare 7+ and OpenServer 5.0.4 are known to have this bug, but it
|
||||
* shouldn't hurt to catch it for all versions of those platforms.
|
||||
@ -571,19 +573,19 @@ TouchSocketFile(void)
|
||||
if (sock_path[0] != '\0')
|
||||
{
|
||||
/*
|
||||
* utime() is POSIX standard, utimes() is a common alternative.
|
||||
* If we have neither, there's no way to affect the mod or access
|
||||
* utime() is POSIX standard, utimes() is a common alternative. If
|
||||
* we have neither, there's no way to affect the mod or access
|
||||
* time of the socket :-(
|
||||
*
|
||||
* In either path, we ignore errors; there's no point in complaining.
|
||||
*/
|
||||
#ifdef HAVE_UTIME
|
||||
utime(sock_path, NULL);
|
||||
#else /* !HAVE_UTIME */
|
||||
#else /* !HAVE_UTIME */
|
||||
#ifdef HAVE_UTIMES
|
||||
utimes(sock_path, NULL);
|
||||
#endif /* HAVE_UTIMES */
|
||||
#endif /* HAVE_UTIME */
|
||||
#endif /* HAVE_UTIMES */
|
||||
#endif /* HAVE_UTIME */
|
||||
}
|
||||
}
|
||||
|
||||
@ -634,9 +636,10 @@ pq_recvbuf(void)
|
||||
continue; /* Ok if interrupted */
|
||||
|
||||
/*
|
||||
* Careful: an ereport() that tries to write to the client would
|
||||
* cause recursion to here, leading to stack overflow and core
|
||||
* dump! This message must go *only* to the postmaster log.
|
||||
* Careful: an ereport() that tries to write to the client
|
||||
* would cause recursion to here, leading to stack overflow
|
||||
* and core dump! This message must go *only* to the
|
||||
* postmaster log.
|
||||
*/
|
||||
ereport(COMMERROR,
|
||||
(errcode_for_socket_access(),
|
||||
@ -646,8 +649,8 @@ pq_recvbuf(void)
|
||||
if (r == 0)
|
||||
{
|
||||
/*
|
||||
* EOF detected. We used to write a log message here, but it's
|
||||
* better to expect the ultimate caller to do that.
|
||||
* EOF detected. We used to write a log message here, but
|
||||
* it's better to expect the ultimate caller to do that.
|
||||
*/
|
||||
return EOF;
|
||||
}
|
||||
@ -894,9 +897,10 @@ pq_flush(void)
|
||||
continue; /* Ok if we were interrupted */
|
||||
|
||||
/*
|
||||
* Careful: an ereport() that tries to write to the client would
|
||||
* cause recursion to here, leading to stack overflow and core
|
||||
* dump! This message must go *only* to the postmaster log.
|
||||
* Careful: an ereport() that tries to write to the client
|
||||
* would cause recursion to here, leading to stack overflow
|
||||
* and core dump! This message must go *only* to the
|
||||
* postmaster log.
|
||||
*
|
||||
* If a client disconnects while we're in the midst of output, we
|
||||
* might write quite a bit of data before we get to a safe
|
||||
|
@ -24,7 +24,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/pqformat.c,v 1.32 2003/07/22 19:00:10 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/pqformat.c,v 1.33 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -58,12 +58,12 @@
|
||||
* pq_getmsgbyte - get a raw byte from a message buffer
|
||||
* pq_getmsgint - get a binary integer from a message buffer
|
||||
* pq_getmsgint64 - get a binary 8-byte int from a message buffer
|
||||
* pq_getmsgfloat4 - get a float4 from a message buffer
|
||||
* pq_getmsgfloat8 - get a float8 from a message buffer
|
||||
* pq_getmsgfloat4 - get a float4 from a message buffer
|
||||
* pq_getmsgfloat8 - get a float8 from a message buffer
|
||||
* pq_getmsgbytes - get raw data from a message buffer
|
||||
* pq_copymsgbytes - copy raw data from a message buffer
|
||||
* pq_copymsgbytes - copy raw data from a message buffer
|
||||
* pq_getmsgtext - get a counted text string (with conversion)
|
||||
* pq_getmsgstring - get a null-terminated text string (with conversion)
|
||||
* pq_getmsgstring - get a null-terminated text string (with conversion)
|
||||
* pq_getmsgend - verify message fully consumed
|
||||
*/
|
||||
|
||||
@ -90,10 +90,12 @@ void
|
||||
pq_beginmessage(StringInfo buf, char msgtype)
|
||||
{
|
||||
initStringInfo(buf);
|
||||
|
||||
/*
|
||||
* We stash the message type into the buffer's cursor field, expecting
|
||||
* that the pq_sendXXX routines won't touch it. We could alternatively
|
||||
* make it the first byte of the buffer contents, but this seems easier.
|
||||
* that the pq_sendXXX routines won't touch it. We could
|
||||
* alternatively make it the first byte of the buffer contents, but
|
||||
* this seems easier.
|
||||
*/
|
||||
buf->cursor = msgtype;
|
||||
}
|
||||
@ -122,7 +124,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen)
|
||||
* pq_sendcountedtext - append a counted text string (with character set conversion)
|
||||
*
|
||||
* The data sent to the frontend by this routine is a 4-byte count field
|
||||
* followed by the string. The count includes itself or not, as per the
|
||||
* followed by the string. The count includes itself or not, as per the
|
||||
* countincludesself flag (pre-3.0 protocol requires it to include itself).
|
||||
* The passed text string need not be null-terminated, and the data sent
|
||||
* to the frontend isn't either.
|
||||
@ -173,9 +175,7 @@ pq_sendtext(StringInfo buf, const char *str, int slen)
|
||||
pfree(p);
|
||||
}
|
||||
else
|
||||
{
|
||||
appendBinaryStringInfo(buf, str, slen);
|
||||
}
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
@ -200,9 +200,7 @@ pq_sendstring(StringInfo buf, const char *str)
|
||||
pfree(p);
|
||||
}
|
||||
else
|
||||
{
|
||||
appendBinaryStringInfo(buf, str, slen + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
@ -281,9 +279,9 @@ pq_sendfloat4(StringInfo buf, float4 f)
|
||||
{
|
||||
union
|
||||
{
|
||||
float4 f;
|
||||
uint32 i;
|
||||
} swap;
|
||||
float4 f;
|
||||
uint32 i;
|
||||
} swap;
|
||||
|
||||
swap.f = f;
|
||||
swap.i = htonl(swap.i);
|
||||
@ -308,9 +306,9 @@ pq_sendfloat8(StringInfo buf, float8 f)
|
||||
#ifdef INT64_IS_BUSTED
|
||||
union
|
||||
{
|
||||
float8 f;
|
||||
uint32 h[2];
|
||||
} swap;
|
||||
float8 f;
|
||||
uint32 h[2];
|
||||
} swap;
|
||||
|
||||
swap.f = f;
|
||||
swap.h[0] = htonl(swap.h[0]);
|
||||
@ -332,9 +330,9 @@ pq_sendfloat8(StringInfo buf, float8 f)
|
||||
#else
|
||||
union
|
||||
{
|
||||
float8 f;
|
||||
int64 i;
|
||||
} swap;
|
||||
float8 f;
|
||||
int64 i;
|
||||
} swap;
|
||||
|
||||
swap.f = f;
|
||||
pq_sendint64(buf, swap.i);
|
||||
@ -515,7 +513,7 @@ pq_getmsgint64(StringInfo msg)
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
* pq_getmsgfloat4 - get a float4 from a message buffer
|
||||
* pq_getmsgfloat4 - get a float4 from a message buffer
|
||||
*
|
||||
* See notes for pq_sendfloat4.
|
||||
* --------------------------------
|
||||
@ -525,16 +523,16 @@ pq_getmsgfloat4(StringInfo msg)
|
||||
{
|
||||
union
|
||||
{
|
||||
float4 f;
|
||||
uint32 i;
|
||||
} swap;
|
||||
float4 f;
|
||||
uint32 i;
|
||||
} swap;
|
||||
|
||||
swap.i = pq_getmsgint(msg, 4);
|
||||
return swap.f;
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
* pq_getmsgfloat8 - get a float8 from a message buffer
|
||||
* pq_getmsgfloat8 - get a float8 from a message buffer
|
||||
*
|
||||
* See notes for pq_sendfloat8.
|
||||
* --------------------------------
|
||||
@ -545,9 +543,9 @@ pq_getmsgfloat8(StringInfo msg)
|
||||
#ifdef INT64_IS_BUSTED
|
||||
union
|
||||
{
|
||||
float8 f;
|
||||
uint32 h[2];
|
||||
} swap;
|
||||
float8 f;
|
||||
uint32 h[2];
|
||||
} swap;
|
||||
|
||||
/* Have to figure out endianness by testing... */
|
||||
if (((uint32) 1) == htonl((uint32) 1))
|
||||
@ -566,9 +564,9 @@ pq_getmsgfloat8(StringInfo msg)
|
||||
#else
|
||||
union
|
||||
{
|
||||
float8 f;
|
||||
int64 i;
|
||||
} swap;
|
||||
float8 f;
|
||||
int64 i;
|
||||
} swap;
|
||||
|
||||
swap.i = pq_getmsgint64(msg);
|
||||
return swap.f;
|
||||
@ -597,7 +595,7 @@ pq_getmsgbytes(StringInfo msg, int datalen)
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
* pq_copymsgbytes - copy raw data from a message buffer
|
||||
* pq_copymsgbytes - copy raw data from a message buffer
|
||||
*
|
||||
* Same as above, except data is copied to caller's buffer.
|
||||
* --------------------------------
|
||||
@ -623,8 +621,8 @@ pq_copymsgbytes(StringInfo msg, char *buf, int datalen)
|
||||
char *
|
||||
pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
|
||||
{
|
||||
char *str;
|
||||
char *p;
|
||||
char *str;
|
||||
char *p;
|
||||
|
||||
if (rawbytes < 0 || rawbytes > (msg->len - msg->cursor))
|
||||
ereport(ERROR,
|
||||
@ -635,9 +633,7 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
|
||||
|
||||
p = (char *) pg_client_to_server((unsigned char *) str, rawbytes);
|
||||
if (p != str) /* actual conversion has been done? */
|
||||
{
|
||||
*nbytes = strlen(p);
|
||||
}
|
||||
else
|
||||
{
|
||||
p = (char *) palloc(rawbytes + 1);
|
||||
@ -649,7 +645,7 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
* pq_getmsgstring - get a null-terminated text string (with conversion)
|
||||
* pq_getmsgstring - get a null-terminated text string (with conversion)
|
||||
*
|
||||
* May return a pointer directly into the message buffer, or a pointer
|
||||
* to a palloc'd conversion result.
|
||||
@ -658,14 +654,15 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
|
||||
const char *
|
||||
pq_getmsgstring(StringInfo msg)
|
||||
{
|
||||
char *str;
|
||||
int slen;
|
||||
char *str;
|
||||
int slen;
|
||||
|
||||
str = &msg->data[msg->cursor];
|
||||
|
||||
/*
|
||||
* It's safe to use strlen() here because a StringInfo is guaranteed
|
||||
* to have a trailing null byte. But check we found a null inside
|
||||
* the message.
|
||||
* to have a trailing null byte. But check we found a null inside the
|
||||
* message.
|
||||
*/
|
||||
slen = strlen(str);
|
||||
if (msg->cursor + slen >= msg->len)
|
||||
|
@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.59 2003/07/27 21:49:53 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.60 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -163,6 +163,7 @@ main(int argc, char *argv[])
|
||||
{
|
||||
#ifndef WIN32
|
||||
#ifndef __BEOS__
|
||||
|
||||
/*
|
||||
* Make sure we are not running as root.
|
||||
*
|
||||
@ -175,8 +176,8 @@ main(int argc, char *argv[])
|
||||
gettext("\"root\" execution of the PostgreSQL server is not permitted.\n"
|
||||
"The server must be started under an unprivileged user id to prevent\n"
|
||||
"possible system security compromise. See the documentation for\n"
|
||||
"more information on how to properly start the server.\n"
|
||||
));
|
||||
"more information on how to properly start the server.\n"
|
||||
));
|
||||
exit(1);
|
||||
}
|
||||
#endif /* !__BEOS__ */
|
||||
@ -193,16 +194,16 @@ main(int argc, char *argv[])
|
||||
if (getuid() != geteuid())
|
||||
{
|
||||
fprintf(stderr,
|
||||
gettext("%s: real and effective user ids must match\n"),
|
||||
gettext("%s: real and effective user ids must match\n"),
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
#endif /* !WIN32 */
|
||||
#endif /* !WIN32 */
|
||||
}
|
||||
|
||||
/*
|
||||
* Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain, or
|
||||
* BootstrapMain depending on the program name (and possibly first
|
||||
* Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain,
|
||||
* or BootstrapMain depending on the program name (and possibly first
|
||||
* argument) we were called with. The lack of consistency here is
|
||||
* historical.
|
||||
*/
|
||||
@ -223,8 +224,8 @@ main(int argc, char *argv[])
|
||||
|
||||
/*
|
||||
* If the first argument is "--help-config", then invoke runtime
|
||||
* configuration option display mode.
|
||||
* We remove "--help-config" from the arguments passed on to GucInfoMain.
|
||||
* configuration option display mode. We remove "--help-config" from
|
||||
* the arguments passed on to GucInfoMain.
|
||||
*/
|
||||
if (argc > 1 && strcmp(new_argv[1], "--help-config") == 0)
|
||||
exit(GucInfoMain(argc - 1, new_argv + 1));
|
||||
@ -246,7 +247,7 @@ main(int argc, char *argv[])
|
||||
pw_name_persist = strdup(pw->pw_name);
|
||||
#else
|
||||
{
|
||||
long namesize = 256 /* UNLEN */ + 1;
|
||||
long namesize = 256 /* UNLEN */ + 1;
|
||||
|
||||
pw_name_persist = malloc(namesize);
|
||||
if (!GetUserName(pw_name_persist, &namesize))
|
||||
|
@ -14,7 +14,7 @@
|
||||
* Copyright (c) 2003, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/bitmapset.c,v 1.3 2003/07/22 23:30:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/bitmapset.c,v 1.4 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -38,7 +38,7 @@
|
||||
* where x's are unspecified bits. The two's complement negative is formed
|
||||
* by inverting all the bits and adding one. Inversion gives
|
||||
* yyyyyy01111
|
||||
* where each y is the inverse of the corresponding x. Incrementing gives
|
||||
* where each y is the inverse of the corresponding x. Incrementing gives
|
||||
* yyyyyy10000
|
||||
* and then ANDing with the original value gives
|
||||
* 00000010000
|
||||
@ -65,41 +65,41 @@
|
||||
*/
|
||||
|
||||
static const uint8 rightmost_one_pos[256] = {
|
||||
0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
|
||||
0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
|
||||
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
|
||||
};
|
||||
|
||||
static const uint8 number_of_ones[256] = {
|
||||
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
|
||||
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
|
||||
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
|
||||
};
|
||||
|
||||
|
||||
@ -107,7 +107,7 @@ static const uint8 number_of_ones[256] = {
|
||||
* bms_copy - make a palloc'd copy of a bitmapset
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_copy(const Bitmapset *a)
|
||||
bms_copy(const Bitmapset * a)
|
||||
{
|
||||
Bitmapset *result;
|
||||
size_t size;
|
||||
@ -127,7 +127,7 @@ bms_copy(const Bitmapset *a)
|
||||
* be reported as equal to a palloc'd value containing no members.
|
||||
*/
|
||||
bool
|
||||
bms_equal(const Bitmapset *a, const Bitmapset *b)
|
||||
bms_equal(const Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
const Bitmapset *shorter;
|
||||
const Bitmapset *longer;
|
||||
@ -143,9 +143,7 @@ bms_equal(const Bitmapset *a, const Bitmapset *b)
|
||||
return bms_is_empty(b);
|
||||
}
|
||||
else if (b == NULL)
|
||||
{
|
||||
return bms_is_empty(a);
|
||||
}
|
||||
/* Identify shorter and longer input */
|
||||
if (a->nwords <= b->nwords)
|
||||
{
|
||||
@ -199,7 +197,7 @@ bms_make_singleton(int x)
|
||||
* Same as pfree except for allowing NULL input
|
||||
*/
|
||||
void
|
||||
bms_free(Bitmapset *a)
|
||||
bms_free(Bitmapset * a)
|
||||
{
|
||||
if (a)
|
||||
pfree(a);
|
||||
@ -216,7 +214,7 @@ bms_free(Bitmapset *a)
|
||||
* bms_union - set union
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_union(const Bitmapset *a, const Bitmapset *b)
|
||||
bms_union(const Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
Bitmapset *result;
|
||||
const Bitmapset *other;
|
||||
@ -242,9 +240,7 @@ bms_union(const Bitmapset *a, const Bitmapset *b)
|
||||
/* And union the shorter input into the result */
|
||||
otherlen = other->nwords;
|
||||
for (i = 0; i < otherlen; i++)
|
||||
{
|
||||
result->words[i] |= other->words[i];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -252,7 +248,7 @@ bms_union(const Bitmapset *a, const Bitmapset *b)
|
||||
* bms_intersect - set intersection
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_intersect(const Bitmapset *a, const Bitmapset *b)
|
||||
bms_intersect(const Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
Bitmapset *result;
|
||||
const Bitmapset *other;
|
||||
@ -276,9 +272,7 @@ bms_intersect(const Bitmapset *a, const Bitmapset *b)
|
||||
/* And intersect the longer input with the result */
|
||||
resultlen = result->nwords;
|
||||
for (i = 0; i < resultlen; i++)
|
||||
{
|
||||
result->words[i] &= other->words[i];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -286,7 +280,7 @@ bms_intersect(const Bitmapset *a, const Bitmapset *b)
|
||||
* bms_difference - set difference (ie, A without members of B)
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_difference(const Bitmapset *a, const Bitmapset *b)
|
||||
bms_difference(const Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
Bitmapset *result;
|
||||
int shortlen;
|
||||
@ -302,9 +296,7 @@ bms_difference(const Bitmapset *a, const Bitmapset *b)
|
||||
/* And remove b's bits from result */
|
||||
shortlen = Min(a->nwords, b->nwords);
|
||||
for (i = 0; i < shortlen; i++)
|
||||
{
|
||||
result->words[i] &= ~ b->words[i];
|
||||
}
|
||||
result->words[i] &= ~b->words[i];
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -312,7 +304,7 @@ bms_difference(const Bitmapset *a, const Bitmapset *b)
|
||||
* bms_is_subset - is A a subset of B?
|
||||
*/
|
||||
bool
|
||||
bms_is_subset(const Bitmapset *a, const Bitmapset *b)
|
||||
bms_is_subset(const Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
int shortlen;
|
||||
int longlen;
|
||||
@ -327,7 +319,7 @@ bms_is_subset(const Bitmapset *a, const Bitmapset *b)
|
||||
shortlen = Min(a->nwords, b->nwords);
|
||||
for (i = 0; i < shortlen; i++)
|
||||
{
|
||||
if ((a->words[i] & ~ b->words[i]) != 0)
|
||||
if ((a->words[i] & ~b->words[i]) != 0)
|
||||
return false;
|
||||
}
|
||||
/* Check extra words */
|
||||
@ -347,7 +339,7 @@ bms_is_subset(const Bitmapset *a, const Bitmapset *b)
|
||||
* bms_is_member - is X a member of A?
|
||||
*/
|
||||
bool
|
||||
bms_is_member(int x, const Bitmapset *a)
|
||||
bms_is_member(int x, const Bitmapset * a)
|
||||
{
|
||||
int wordnum,
|
||||
bitnum;
|
||||
@ -370,7 +362,7 @@ bms_is_member(int x, const Bitmapset *a)
|
||||
* bms_overlap - do sets overlap (ie, have a nonempty intersection)?
|
||||
*/
|
||||
bool
|
||||
bms_overlap(const Bitmapset *a, const Bitmapset *b)
|
||||
bms_overlap(const Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
int shortlen;
|
||||
int i;
|
||||
@ -392,7 +384,7 @@ bms_overlap(const Bitmapset *a, const Bitmapset *b)
|
||||
* bms_nonempty_difference - do sets have a nonempty difference?
|
||||
*/
|
||||
bool
|
||||
bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
|
||||
bms_nonempty_difference(const Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
int shortlen;
|
||||
int i;
|
||||
@ -406,7 +398,7 @@ bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
|
||||
shortlen = Min(a->nwords, b->nwords);
|
||||
for (i = 0; i < shortlen; i++)
|
||||
{
|
||||
if ((a->words[i] & ~ b->words[i]) != 0)
|
||||
if ((a->words[i] & ~b->words[i]) != 0)
|
||||
return true;
|
||||
}
|
||||
/* Check extra words in a */
|
||||
@ -424,11 +416,11 @@ bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
|
||||
* Raises error if |a| is not 1.
|
||||
*/
|
||||
int
|
||||
bms_singleton_member(const Bitmapset *a)
|
||||
bms_singleton_member(const Bitmapset * a)
|
||||
{
|
||||
int result = -1;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
int result = -1;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
|
||||
if (a == NULL)
|
||||
elog(ERROR, "bitmapset is empty");
|
||||
@ -459,11 +451,11 @@ bms_singleton_member(const Bitmapset *a)
|
||||
* bms_num_members - count members of set
|
||||
*/
|
||||
int
|
||||
bms_num_members(const Bitmapset *a)
|
||||
bms_num_members(const Bitmapset * a)
|
||||
{
|
||||
int result = 0;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
int result = 0;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
|
||||
if (a == NULL)
|
||||
return 0;
|
||||
@ -488,11 +480,11 @@ bms_num_members(const Bitmapset *a)
|
||||
* This is faster than making an exact count with bms_num_members().
|
||||
*/
|
||||
BMS_Membership
|
||||
bms_membership(const Bitmapset *a)
|
||||
bms_membership(const Bitmapset * a)
|
||||
{
|
||||
BMS_Membership result = BMS_EMPTY_SET;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
|
||||
if (a == NULL)
|
||||
return BMS_EMPTY_SET;
|
||||
@ -517,10 +509,10 @@ bms_membership(const Bitmapset *a)
|
||||
* This is even faster than bms_membership().
|
||||
*/
|
||||
bool
|
||||
bms_is_empty(const Bitmapset *a)
|
||||
bms_is_empty(const Bitmapset * a)
|
||||
{
|
||||
int nwords;
|
||||
int wordnum;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
|
||||
if (a == NULL)
|
||||
return true;
|
||||
@ -552,7 +544,7 @@ bms_is_empty(const Bitmapset *a)
|
||||
* Input set is modified or recycled!
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_add_member(Bitmapset *a, int x)
|
||||
bms_add_member(Bitmapset * a, int x)
|
||||
{
|
||||
int wordnum,
|
||||
bitnum;
|
||||
@ -573,9 +565,7 @@ bms_add_member(Bitmapset *a, int x)
|
||||
result = bms_make_singleton(x);
|
||||
nwords = a->nwords;
|
||||
for (i = 0; i < nwords; i++)
|
||||
{
|
||||
result->words[i] |= a->words[i];
|
||||
}
|
||||
pfree(a);
|
||||
return result;
|
||||
}
|
||||
@ -592,7 +582,7 @@ bms_add_member(Bitmapset *a, int x)
|
||||
* Input set is modified in-place!
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_del_member(Bitmapset *a, int x)
|
||||
bms_del_member(Bitmapset * a, int x)
|
||||
{
|
||||
int wordnum,
|
||||
bitnum;
|
||||
@ -604,9 +594,7 @@ bms_del_member(Bitmapset *a, int x)
|
||||
wordnum = WORDNUM(x);
|
||||
bitnum = BITNUM(x);
|
||||
if (wordnum < a->nwords)
|
||||
{
|
||||
a->words[wordnum] &= ~ ((bitmapword) 1 << bitnum);
|
||||
}
|
||||
a->words[wordnum] &= ~((bitmapword) 1 << bitnum);
|
||||
return a;
|
||||
}
|
||||
|
||||
@ -614,7 +602,7 @@ bms_del_member(Bitmapset *a, int x)
|
||||
* bms_add_members - like bms_union, but left input is recycled
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_add_members(Bitmapset *a, const Bitmapset *b)
|
||||
bms_add_members(Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
Bitmapset *result;
|
||||
const Bitmapset *other;
|
||||
@ -640,9 +628,7 @@ bms_add_members(Bitmapset *a, const Bitmapset *b)
|
||||
/* And union the shorter input into the result */
|
||||
otherlen = other->nwords;
|
||||
for (i = 0; i < otherlen; i++)
|
||||
{
|
||||
result->words[i] |= other->words[i];
|
||||
}
|
||||
if (result != a)
|
||||
pfree(a);
|
||||
return result;
|
||||
@ -652,7 +638,7 @@ bms_add_members(Bitmapset *a, const Bitmapset *b)
|
||||
* bms_int_members - like bms_intersect, but left input is recycled
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_int_members(Bitmapset *a, const Bitmapset *b)
|
||||
bms_int_members(Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
int shortlen;
|
||||
int i;
|
||||
@ -668,13 +654,9 @@ bms_int_members(Bitmapset *a, const Bitmapset *b)
|
||||
/* Intersect b into a; we need never copy */
|
||||
shortlen = Min(a->nwords, b->nwords);
|
||||
for (i = 0; i < shortlen; i++)
|
||||
{
|
||||
a->words[i] &= b->words[i];
|
||||
}
|
||||
for (; i < a->nwords; i++)
|
||||
{
|
||||
a->words[i] = 0;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
@ -682,7 +664,7 @@ bms_int_members(Bitmapset *a, const Bitmapset *b)
|
||||
* bms_del_members - like bms_difference, but left input is recycled
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_del_members(Bitmapset *a, const Bitmapset *b)
|
||||
bms_del_members(Bitmapset * a, const Bitmapset * b)
|
||||
{
|
||||
int shortlen;
|
||||
int i;
|
||||
@ -695,9 +677,7 @@ bms_del_members(Bitmapset *a, const Bitmapset *b)
|
||||
/* Remove b's bits from a; we need never copy */
|
||||
shortlen = Min(a->nwords, b->nwords);
|
||||
for (i = 0; i < shortlen; i++)
|
||||
{
|
||||
a->words[i] &= ~ b->words[i];
|
||||
}
|
||||
a->words[i] &= ~b->words[i];
|
||||
return a;
|
||||
}
|
||||
|
||||
@ -705,7 +685,7 @@ bms_del_members(Bitmapset *a, const Bitmapset *b)
|
||||
* bms_join - like bms_union, but *both* inputs are recycled
|
||||
*/
|
||||
Bitmapset *
|
||||
bms_join(Bitmapset *a, Bitmapset *b)
|
||||
bms_join(Bitmapset * a, Bitmapset * b)
|
||||
{
|
||||
Bitmapset *result;
|
||||
Bitmapset *other;
|
||||
@ -731,9 +711,7 @@ bms_join(Bitmapset *a, Bitmapset *b)
|
||||
/* And union the shorter input into the result */
|
||||
otherlen = other->nwords;
|
||||
for (i = 0; i < otherlen; i++)
|
||||
{
|
||||
result->words[i] |= other->words[i];
|
||||
}
|
||||
if (other != result) /* pure paranoia */
|
||||
pfree(other);
|
||||
return result;
|
||||
@ -742,24 +720,22 @@ bms_join(Bitmapset *a, Bitmapset *b)
|
||||
/*----------
|
||||
* bms_first_member - find and remove first member of a set
|
||||
*
|
||||
* Returns -1 if set is empty. NB: set is destructively modified!
|
||||
* Returns -1 if set is empty. NB: set is destructively modified!
|
||||
*
|
||||
* This is intended as support for iterating through the members of a set.
|
||||
* The typical pattern is
|
||||
*
|
||||
* tmpset = bms_copy(inputset);
|
||||
* while ((x = bms_first_member(tmpset)) >= 0)
|
||||
* {
|
||||
* process member x;
|
||||
* }
|
||||
* bms_free(tmpset);
|
||||
*----------
|
||||
*/
|
||||
int
|
||||
bms_first_member(Bitmapset *a)
|
||||
bms_first_member(Bitmapset * a)
|
||||
{
|
||||
int nwords;
|
||||
int wordnum;
|
||||
int nwords;
|
||||
int wordnum;
|
||||
|
||||
if (a == NULL)
|
||||
return -1;
|
||||
@ -770,10 +746,10 @@ bms_first_member(Bitmapset *a)
|
||||
|
||||
if (w != 0)
|
||||
{
|
||||
int result;
|
||||
int result;
|
||||
|
||||
w = RIGHTMOST_ONE(w);
|
||||
a->words[wordnum] &= ~ w;
|
||||
a->words[wordnum] &= ~w;
|
||||
|
||||
result = wordnum * BITS_PER_BITMAPWORD;
|
||||
while ((w & 255) == 0)
|
||||
|
@ -4,7 +4,7 @@
|
||||
* Copy functions for Postgres tree nodes.
|
||||
*
|
||||
* NOTE: we currently support copying all node types found in parse and
|
||||
* plan trees. We do not support copying executor state trees; there
|
||||
* plan trees. We do not support copying executor state trees; there
|
||||
* is no need for that, and no point in maintaining all the code that
|
||||
* would be needed. We also do not support copying Path trees, mainly
|
||||
* because the circular linkages between RelOptInfo and Path nodes can't
|
||||
@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.260 2003/07/22 23:30:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.261 2003/08/04 00:43:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
/*
|
||||
* Macros to simplify copying of different kinds of fields. Use these
|
||||
* wherever possible to reduce the chance for silly typos. Note that these
|
||||
* wherever possible to reduce the chance for silly typos. Note that these
|
||||
* hard-wire the convention that the local variables in a Copy routine are
|
||||
* named 'newnode' and 'from'.
|
||||
*/
|
||||
@ -639,7 +639,7 @@ _copyRangeVar(RangeVar *from)
|
||||
|
||||
/*
|
||||
* We don't need a _copyExpr because Expr is an abstract supertype which
|
||||
* should never actually get instantiated. Also, since it has no common
|
||||
* should never actually get instantiated. Also, since it has no common
|
||||
* fields except NodeTag, there's no need for a helper routine to factor
|
||||
* out copying the common fields...
|
||||
*/
|
||||
@ -755,9 +755,9 @@ _copyArrayRef(ArrayRef *from)
|
||||
* _copyFuncExpr
|
||||
*/
|
||||
static FuncExpr *
|
||||
_copyFuncExpr(FuncExpr *from)
|
||||
_copyFuncExpr(FuncExpr * from)
|
||||
{
|
||||
FuncExpr *newnode = makeNode(FuncExpr);
|
||||
FuncExpr *newnode = makeNode(FuncExpr);
|
||||
|
||||
COPY_SCALAR_FIELD(funcid);
|
||||
COPY_SCALAR_FIELD(funcresulttype);
|
||||
@ -772,7 +772,7 @@ _copyFuncExpr(FuncExpr *from)
|
||||
* _copyOpExpr
|
||||
*/
|
||||
static OpExpr *
|
||||
_copyOpExpr(OpExpr *from)
|
||||
_copyOpExpr(OpExpr * from)
|
||||
{
|
||||
OpExpr *newnode = makeNode(OpExpr);
|
||||
|
||||
@ -789,9 +789,9 @@ _copyOpExpr(OpExpr *from)
|
||||
* _copyDistinctExpr (same as OpExpr)
|
||||
*/
|
||||
static DistinctExpr *
|
||||
_copyDistinctExpr(DistinctExpr *from)
|
||||
_copyDistinctExpr(DistinctExpr * from)
|
||||
{
|
||||
DistinctExpr *newnode = makeNode(DistinctExpr);
|
||||
DistinctExpr *newnode = makeNode(DistinctExpr);
|
||||
|
||||
COPY_SCALAR_FIELD(opno);
|
||||
COPY_SCALAR_FIELD(opfuncid);
|
||||
@ -806,9 +806,9 @@ _copyDistinctExpr(DistinctExpr *from)
|
||||
* _copyScalarArrayOpExpr
|
||||
*/
|
||||
static ScalarArrayOpExpr *
|
||||
_copyScalarArrayOpExpr(ScalarArrayOpExpr *from)
|
||||
_copyScalarArrayOpExpr(ScalarArrayOpExpr * from)
|
||||
{
|
||||
ScalarArrayOpExpr *newnode = makeNode(ScalarArrayOpExpr);
|
||||
ScalarArrayOpExpr *newnode = makeNode(ScalarArrayOpExpr);
|
||||
|
||||
COPY_SCALAR_FIELD(opno);
|
||||
COPY_SCALAR_FIELD(opfuncid);
|
||||
@ -822,9 +822,9 @@ _copyScalarArrayOpExpr(ScalarArrayOpExpr *from)
|
||||
* _copyBoolExpr
|
||||
*/
|
||||
static BoolExpr *
|
||||
_copyBoolExpr(BoolExpr *from)
|
||||
_copyBoolExpr(BoolExpr * from)
|
||||
{
|
||||
BoolExpr *newnode = makeNode(BoolExpr);
|
||||
BoolExpr *newnode = makeNode(BoolExpr);
|
||||
|
||||
COPY_SCALAR_FIELD(boolop);
|
||||
COPY_NODE_FIELD(args);
|
||||
@ -940,9 +940,9 @@ _copyCaseWhen(CaseWhen *from)
|
||||
* _copyArrayExpr
|
||||
*/
|
||||
static ArrayExpr *
|
||||
_copyArrayExpr(ArrayExpr *from)
|
||||
_copyArrayExpr(ArrayExpr * from)
|
||||
{
|
||||
ArrayExpr *newnode = makeNode(ArrayExpr);
|
||||
ArrayExpr *newnode = makeNode(ArrayExpr);
|
||||
|
||||
COPY_SCALAR_FIELD(array_typeid);
|
||||
COPY_SCALAR_FIELD(element_typeid);
|
||||
@ -956,7 +956,7 @@ _copyArrayExpr(ArrayExpr *from)
|
||||
* _copyCoalesceExpr
|
||||
*/
|
||||
static CoalesceExpr *
|
||||
_copyCoalesceExpr(CoalesceExpr *from)
|
||||
_copyCoalesceExpr(CoalesceExpr * from)
|
||||
{
|
||||
CoalesceExpr *newnode = makeNode(CoalesceExpr);
|
||||
|
||||
@ -970,9 +970,9 @@ _copyCoalesceExpr(CoalesceExpr *from)
|
||||
* _copyNullIfExpr (same as OpExpr)
|
||||
*/
|
||||
static NullIfExpr *
|
||||
_copyNullIfExpr(NullIfExpr *from)
|
||||
_copyNullIfExpr(NullIfExpr * from)
|
||||
{
|
||||
NullIfExpr *newnode = makeNode(NullIfExpr);
|
||||
NullIfExpr *newnode = makeNode(NullIfExpr);
|
||||
|
||||
COPY_SCALAR_FIELD(opno);
|
||||
COPY_SCALAR_FIELD(opfuncid);
|
||||
@ -1015,7 +1015,7 @@ _copyBooleanTest(BooleanTest *from)
|
||||
* _copyCoerceToDomain
|
||||
*/
|
||||
static CoerceToDomain *
|
||||
_copyCoerceToDomain(CoerceToDomain *from)
|
||||
_copyCoerceToDomain(CoerceToDomain * from)
|
||||
{
|
||||
CoerceToDomain *newnode = makeNode(CoerceToDomain);
|
||||
|
||||
@ -1031,7 +1031,7 @@ _copyCoerceToDomain(CoerceToDomain *from)
|
||||
* _copyCoerceToDomainValue
|
||||
*/
|
||||
static CoerceToDomainValue *
|
||||
_copyCoerceToDomainValue(CoerceToDomainValue *from)
|
||||
_copyCoerceToDomainValue(CoerceToDomainValue * from)
|
||||
{
|
||||
CoerceToDomainValue *newnode = makeNode(CoerceToDomainValue);
|
||||
|
||||
@ -1045,7 +1045,7 @@ _copyCoerceToDomainValue(CoerceToDomainValue *from)
|
||||
* _copySetToDefault
|
||||
*/
|
||||
static SetToDefault *
|
||||
_copySetToDefault(SetToDefault *from)
|
||||
_copySetToDefault(SetToDefault * from)
|
||||
{
|
||||
SetToDefault *newnode = makeNode(SetToDefault);
|
||||
|
||||
@ -1148,7 +1148,7 @@ _copyRestrictInfo(RestrictInfo *from)
|
||||
|
||||
COPY_NODE_FIELD(clause);
|
||||
COPY_SCALAR_FIELD(ispusheddown);
|
||||
COPY_NODE_FIELD(subclauseindices); /* XXX probably bad */
|
||||
COPY_NODE_FIELD(subclauseindices); /* XXX probably bad */
|
||||
COPY_SCALAR_FIELD(eval_cost);
|
||||
COPY_SCALAR_FIELD(this_selec);
|
||||
COPY_BITMAPSET_FIELD(left_relids);
|
||||
@ -1191,7 +1191,7 @@ _copyJoinInfo(JoinInfo *from)
|
||||
* _copyInClauseInfo
|
||||
*/
|
||||
static InClauseInfo *
|
||||
_copyInClauseInfo(InClauseInfo *from)
|
||||
_copyInClauseInfo(InClauseInfo * from)
|
||||
{
|
||||
InClauseInfo *newnode = makeNode(InClauseInfo);
|
||||
|
||||
@ -1532,9 +1532,9 @@ _copyQuery(Query *from)
|
||||
|
||||
/*
|
||||
* We do not copy the other planner internal fields: base_rel_list,
|
||||
* other_rel_list, join_rel_list, equi_key_list, query_pathkeys.
|
||||
* That would get us into copying RelOptInfo/Path trees, which we don't
|
||||
* want to do. It is necessary to copy in_info_list and hasJoinRTEs
|
||||
* other_rel_list, join_rel_list, equi_key_list, query_pathkeys. That
|
||||
* would get us into copying RelOptInfo/Path trees, which we don't
|
||||
* want to do. It is necessary to copy in_info_list and hasJoinRTEs
|
||||
* for the benefit of inheritance_planner(), which may try to copy a
|
||||
* Query in which these are already set.
|
||||
*/
|
||||
@ -1633,7 +1633,7 @@ _copyAlterTableStmt(AlterTableStmt *from)
|
||||
}
|
||||
|
||||
static AlterDomainStmt *
|
||||
_copyAlterDomainStmt(AlterDomainStmt *from)
|
||||
_copyAlterDomainStmt(AlterDomainStmt * from)
|
||||
{
|
||||
AlterDomainStmt *newnode = makeNode(AlterDomainStmt);
|
||||
|
||||
@ -1644,7 +1644,7 @@ _copyAlterDomainStmt(AlterDomainStmt *from)
|
||||
COPY_SCALAR_FIELD(behavior);
|
||||
|
||||
return newnode;
|
||||
}
|
||||
}
|
||||
|
||||
static GrantStmt *
|
||||
_copyGrantStmt(GrantStmt *from)
|
||||
@ -1685,7 +1685,7 @@ _copyFuncWithArgs(FuncWithArgs *from)
|
||||
}
|
||||
|
||||
static DeclareCursorStmt *
|
||||
_copyDeclareCursorStmt(DeclareCursorStmt *from)
|
||||
_copyDeclareCursorStmt(DeclareCursorStmt * from)
|
||||
{
|
||||
DeclareCursorStmt *newnode = makeNode(DeclareCursorStmt);
|
||||
|
||||
@ -1747,7 +1747,7 @@ _copyCreateStmt(CreateStmt *from)
|
||||
}
|
||||
|
||||
static InhRelation *
|
||||
_copyInhRelation(InhRelation *from)
|
||||
_copyInhRelation(InhRelation * from)
|
||||
{
|
||||
InhRelation *newnode = makeNode(InhRelation);
|
||||
|
||||
@ -2118,7 +2118,7 @@ _copyCreateSeqStmt(CreateSeqStmt *from)
|
||||
}
|
||||
|
||||
static AlterSeqStmt *
|
||||
_copyAlterSeqStmt(AlterSeqStmt *from)
|
||||
_copyAlterSeqStmt(AlterSeqStmt * from)
|
||||
{
|
||||
AlterSeqStmt *newnode = makeNode(AlterSeqStmt);
|
||||
|
||||
@ -2171,7 +2171,7 @@ _copyCreateTrigStmt(CreateTrigStmt *from)
|
||||
COPY_NODE_FIELD(args);
|
||||
COPY_SCALAR_FIELD(before);
|
||||
COPY_SCALAR_FIELD(row);
|
||||
strcpy(newnode->actions, from->actions); /* in-line string field */
|
||||
strcpy(newnode->actions, from->actions); /* in-line string field */
|
||||
COPY_SCALAR_FIELD(isconstraint);
|
||||
COPY_SCALAR_FIELD(deferrable);
|
||||
COPY_SCALAR_FIELD(initdeferred);
|
||||
|
@ -11,14 +11,14 @@
|
||||
* be handled easily in a simple depth-first traversal.
|
||||
*
|
||||
* Currently, in fact, equal() doesn't know how to compare Plan trees
|
||||
* either. This might need to be fixed someday.
|
||||
* either. This might need to be fixed someday.
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.204 2003/07/28 00:09:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.205 2003/08/04 00:43:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -32,8 +32,8 @@
|
||||
|
||||
|
||||
/*
|
||||
* Macros to simplify comparison of different kinds of fields. Use these
|
||||
* wherever possible to reduce the chance for silly typos. Note that these
|
||||
* Macros to simplify comparison of different kinds of fields. Use these
|
||||
* wherever possible to reduce the chance for silly typos. Note that these
|
||||
* hard-wire the convention that the local variables in an Equal routine are
|
||||
* named 'a' and 'b'.
|
||||
*/
|
||||
@ -135,7 +135,7 @@ _equalRangeVar(RangeVar *a, RangeVar *b)
|
||||
|
||||
/*
|
||||
* We don't need an _equalExpr because Expr is an abstract supertype which
|
||||
* should never actually get instantiated. Also, since it has no common
|
||||
* should never actually get instantiated. Also, since it has no common
|
||||
* fields except NodeTag, there's no need for a helper routine to factor
|
||||
* out comparing the common fields...
|
||||
*/
|
||||
@ -224,11 +224,12 @@ _equalArrayRef(ArrayRef *a, ArrayRef *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalFuncExpr(FuncExpr *a, FuncExpr *b)
|
||||
_equalFuncExpr(FuncExpr * a, FuncExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(funcid);
|
||||
COMPARE_SCALAR_FIELD(funcresulttype);
|
||||
COMPARE_SCALAR_FIELD(funcretset);
|
||||
|
||||
/*
|
||||
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
|
||||
* nodes that are equal() to both explicit and implicit coercions.
|
||||
@ -244,14 +245,15 @@ _equalFuncExpr(FuncExpr *a, FuncExpr *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalOpExpr(OpExpr *a, OpExpr *b)
|
||||
_equalOpExpr(OpExpr * a, OpExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(opno);
|
||||
|
||||
/*
|
||||
* Special-case opfuncid: it is allowable for it to differ if one
|
||||
* node contains zero and the other doesn't. This just means that the
|
||||
* one node isn't as far along in the parse/plan pipeline and hasn't
|
||||
* had the opfuncid cache filled yet.
|
||||
* Special-case opfuncid: it is allowable for it to differ if one node
|
||||
* contains zero and the other doesn't. This just means that the one
|
||||
* node isn't as far along in the parse/plan pipeline and hasn't had
|
||||
* the opfuncid cache filled yet.
|
||||
*/
|
||||
if (a->opfuncid != b->opfuncid &&
|
||||
a->opfuncid != 0 &&
|
||||
@ -266,14 +268,15 @@ _equalOpExpr(OpExpr *a, OpExpr *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalDistinctExpr(DistinctExpr *a, DistinctExpr *b)
|
||||
_equalDistinctExpr(DistinctExpr * a, DistinctExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(opno);
|
||||
|
||||
/*
|
||||
* Special-case opfuncid: it is allowable for it to differ if one
|
||||
* node contains zero and the other doesn't. This just means that the
|
||||
* one node isn't as far along in the parse/plan pipeline and hasn't
|
||||
* had the opfuncid cache filled yet.
|
||||
* Special-case opfuncid: it is allowable for it to differ if one node
|
||||
* contains zero and the other doesn't. This just means that the one
|
||||
* node isn't as far along in the parse/plan pipeline and hasn't had
|
||||
* the opfuncid cache filled yet.
|
||||
*/
|
||||
if (a->opfuncid != b->opfuncid &&
|
||||
a->opfuncid != 0 &&
|
||||
@ -288,14 +291,15 @@ _equalDistinctExpr(DistinctExpr *a, DistinctExpr *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b)
|
||||
_equalScalarArrayOpExpr(ScalarArrayOpExpr * a, ScalarArrayOpExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(opno);
|
||||
|
||||
/*
|
||||
* Special-case opfuncid: it is allowable for it to differ if one
|
||||
* node contains zero and the other doesn't. This just means that the
|
||||
* one node isn't as far along in the parse/plan pipeline and hasn't
|
||||
* had the opfuncid cache filled yet.
|
||||
* Special-case opfuncid: it is allowable for it to differ if one node
|
||||
* contains zero and the other doesn't. This just means that the one
|
||||
* node isn't as far along in the parse/plan pipeline and hasn't had
|
||||
* the opfuncid cache filled yet.
|
||||
*/
|
||||
if (a->opfuncid != b->opfuncid &&
|
||||
a->opfuncid != 0 &&
|
||||
@ -309,7 +313,7 @@ _equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalBoolExpr(BoolExpr *a, BoolExpr *b)
|
||||
_equalBoolExpr(BoolExpr * a, BoolExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(boolop);
|
||||
COMPARE_NODE_FIELD(args);
|
||||
@ -366,6 +370,7 @@ _equalRelabelType(RelabelType *a, RelabelType *b)
|
||||
COMPARE_NODE_FIELD(arg);
|
||||
COMPARE_SCALAR_FIELD(resulttype);
|
||||
COMPARE_SCALAR_FIELD(resulttypmod);
|
||||
|
||||
/*
|
||||
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
|
||||
* nodes that are equal() to both explicit and implicit coercions.
|
||||
@ -399,7 +404,7 @@ _equalCaseWhen(CaseWhen *a, CaseWhen *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalArrayExpr(ArrayExpr *a, ArrayExpr *b)
|
||||
_equalArrayExpr(ArrayExpr * a, ArrayExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(array_typeid);
|
||||
COMPARE_SCALAR_FIELD(element_typeid);
|
||||
@ -410,7 +415,7 @@ _equalArrayExpr(ArrayExpr *a, ArrayExpr *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalCoalesceExpr(CoalesceExpr *a, CoalesceExpr *b)
|
||||
_equalCoalesceExpr(CoalesceExpr * a, CoalesceExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(coalescetype);
|
||||
COMPARE_NODE_FIELD(args);
|
||||
@ -419,14 +424,15 @@ _equalCoalesceExpr(CoalesceExpr *a, CoalesceExpr *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalNullIfExpr(NullIfExpr *a, NullIfExpr *b)
|
||||
_equalNullIfExpr(NullIfExpr * a, NullIfExpr * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(opno);
|
||||
|
||||
/*
|
||||
* Special-case opfuncid: it is allowable for it to differ if one
|
||||
* node contains zero and the other doesn't. This just means that the
|
||||
* one node isn't as far along in the parse/plan pipeline and hasn't
|
||||
* had the opfuncid cache filled yet.
|
||||
* Special-case opfuncid: it is allowable for it to differ if one node
|
||||
* contains zero and the other doesn't. This just means that the one
|
||||
* node isn't as far along in the parse/plan pipeline and hasn't had
|
||||
* the opfuncid cache filled yet.
|
||||
*/
|
||||
if (a->opfuncid != b->opfuncid &&
|
||||
a->opfuncid != 0 &&
|
||||
@ -459,11 +465,12 @@ _equalBooleanTest(BooleanTest *a, BooleanTest *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b)
|
||||
_equalCoerceToDomain(CoerceToDomain * a, CoerceToDomain * b)
|
||||
{
|
||||
COMPARE_NODE_FIELD(arg);
|
||||
COMPARE_SCALAR_FIELD(resulttype);
|
||||
COMPARE_SCALAR_FIELD(resulttypmod);
|
||||
|
||||
/*
|
||||
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
|
||||
* nodes that are equal() to both explicit and implicit coercions.
|
||||
@ -477,7 +484,7 @@ _equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalCoerceToDomainValue(CoerceToDomainValue *a, CoerceToDomainValue *b)
|
||||
_equalCoerceToDomainValue(CoerceToDomainValue * a, CoerceToDomainValue * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(typeId);
|
||||
COMPARE_SCALAR_FIELD(typeMod);
|
||||
@ -486,7 +493,7 @@ _equalCoerceToDomainValue(CoerceToDomainValue *a, CoerceToDomainValue *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalSetToDefault(SetToDefault *a, SetToDefault *b)
|
||||
_equalSetToDefault(SetToDefault * a, SetToDefault * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(typeId);
|
||||
COMPARE_SCALAR_FIELD(typeMod);
|
||||
@ -554,11 +561,13 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
|
||||
{
|
||||
COMPARE_NODE_FIELD(clause);
|
||||
COMPARE_SCALAR_FIELD(ispusheddown);
|
||||
|
||||
/*
|
||||
* We ignore subclauseindices, eval_cost, this_selec, left/right_relids,
|
||||
* left/right_pathkey, and left/right_bucketsize, since they may not be
|
||||
* set yet, and should be derivable from the clause anyway. Probably it's
|
||||
* not really necessary to compare any of these remaining fields ...
|
||||
* We ignore subclauseindices, eval_cost, this_selec,
|
||||
* left/right_relids, left/right_pathkey, and left/right_bucketsize,
|
||||
* since they may not be set yet, and should be derivable from the
|
||||
* clause anyway. Probably it's not really necessary to compare any
|
||||
* of these remaining fields ...
|
||||
*/
|
||||
COMPARE_SCALAR_FIELD(mergejoinoperator);
|
||||
COMPARE_SCALAR_FIELD(left_sortop);
|
||||
@ -578,7 +587,7 @@ _equalJoinInfo(JoinInfo *a, JoinInfo *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalInClauseInfo(InClauseInfo *a, InClauseInfo *b)
|
||||
_equalInClauseInfo(InClauseInfo * a, InClauseInfo * b)
|
||||
{
|
||||
COMPARE_BITMAPSET_FIELD(lefthand);
|
||||
COMPARE_BITMAPSET_FIELD(righthand);
|
||||
@ -620,9 +629,9 @@ _equalQuery(Query *a, Query *b)
|
||||
|
||||
/*
|
||||
* We do not check the other planner internal fields: base_rel_list,
|
||||
* other_rel_list, join_rel_list, equi_key_list, query_pathkeys.
|
||||
* They might not be set yet, and in any case they should be derivable
|
||||
* from the other fields.
|
||||
* other_rel_list, join_rel_list, equi_key_list, query_pathkeys. They
|
||||
* might not be set yet, and in any case they should be derivable from
|
||||
* the other fields.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
@ -706,7 +715,7 @@ _equalAlterTableStmt(AlterTableStmt *a, AlterTableStmt *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalAlterDomainStmt(AlterDomainStmt *a, AlterDomainStmt *b)
|
||||
_equalAlterDomainStmt(AlterDomainStmt * a, AlterDomainStmt * b)
|
||||
{
|
||||
COMPARE_SCALAR_FIELD(subtype);
|
||||
COMPARE_NODE_FIELD(typename);
|
||||
@ -750,7 +759,7 @@ _equalFuncWithArgs(FuncWithArgs *a, FuncWithArgs *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalDeclareCursorStmt(DeclareCursorStmt *a, DeclareCursorStmt *b)
|
||||
_equalDeclareCursorStmt(DeclareCursorStmt * a, DeclareCursorStmt * b)
|
||||
{
|
||||
COMPARE_STRING_FIELD(portalname);
|
||||
COMPARE_SCALAR_FIELD(options);
|
||||
@ -802,7 +811,7 @@ _equalCreateStmt(CreateStmt *a, CreateStmt *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalInhRelation(InhRelation *a, InhRelation *b)
|
||||
_equalInhRelation(InhRelation * a, InhRelation * b)
|
||||
{
|
||||
COMPARE_NODE_FIELD(relation);
|
||||
COMPARE_SCALAR_FIELD(including_defaults);
|
||||
@ -1113,7 +1122,7 @@ _equalCreateSeqStmt(CreateSeqStmt *a, CreateSeqStmt *b)
|
||||
}
|
||||
|
||||
static bool
|
||||
_equalAlterSeqStmt(AlterSeqStmt *a, AlterSeqStmt *b)
|
||||
_equalAlterSeqStmt(AlterSeqStmt * a, AlterSeqStmt * b)
|
||||
{
|
||||
COMPARE_NODE_FIELD(sequence);
|
||||
COMPARE_NODE_FIELD(options);
|
||||
@ -1156,7 +1165,7 @@ _equalCreateTrigStmt(CreateTrigStmt *a, CreateTrigStmt *b)
|
||||
COMPARE_NODE_FIELD(args);
|
||||
COMPARE_SCALAR_FIELD(before);
|
||||
COMPARE_SCALAR_FIELD(row);
|
||||
if (strcmp(a->actions, b->actions) != 0) /* in-line string field */
|
||||
if (strcmp(a->actions, b->actions) != 0) /* in-line string field */
|
||||
return false;
|
||||
COMPARE_SCALAR_FIELD(isconstraint);
|
||||
COMPARE_SCALAR_FIELD(deferrable);
|
||||
@ -1400,7 +1409,7 @@ _equalParamRef(ParamRef *a, ParamRef *b)
|
||||
static bool
|
||||
_equalAConst(A_Const *a, A_Const *b)
|
||||
{
|
||||
if (!equal(&a->val, &b->val)) /* hack for in-line Value field */
|
||||
if (!equal(&a->val, &b->val)) /* hack for in-line Value field */
|
||||
return false;
|
||||
COMPARE_NODE_FIELD(typename);
|
||||
|
||||
@ -1649,9 +1658,9 @@ equal(void *a, void *b)
|
||||
|
||||
switch (nodeTag(a))
|
||||
{
|
||||
/*
|
||||
* PRIMITIVE NODES
|
||||
*/
|
||||
/*
|
||||
* PRIMITIVE NODES
|
||||
*/
|
||||
case T_Resdom:
|
||||
retval = _equalResdom(a, b);
|
||||
break;
|
||||
@ -1841,7 +1850,7 @@ equal(void *a, void *b)
|
||||
retval = _equalCreateStmt(a, b);
|
||||
break;
|
||||
case T_InhRelation:
|
||||
retval = _equalInhRelation(a,b);
|
||||
retval = _equalInhRelation(a, b);
|
||||
break;
|
||||
case T_DefineStmt:
|
||||
retval = _equalDefineStmt(a, b);
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.51 2003/07/22 23:30:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.52 2003/08/04 00:43:19 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* XXX a few of the following functions are duplicated to handle
|
||||
@ -202,7 +202,7 @@ nconc(List *l1, List *l2)
|
||||
* since we avoid having to chase down the list again each time.
|
||||
*/
|
||||
void
|
||||
FastAppend(FastList *fl, void *datum)
|
||||
FastAppend(FastList * fl, void *datum)
|
||||
{
|
||||
List *cell = makeList1(datum);
|
||||
|
||||
@ -223,7 +223,7 @@ FastAppend(FastList *fl, void *datum)
|
||||
* FastAppendi - same for integers
|
||||
*/
|
||||
void
|
||||
FastAppendi(FastList *fl, int datum)
|
||||
FastAppendi(FastList * fl, int datum)
|
||||
{
|
||||
List *cell = makeListi1(datum);
|
||||
|
||||
@ -244,7 +244,7 @@ FastAppendi(FastList *fl, int datum)
|
||||
* FastAppendo - same for Oids
|
||||
*/
|
||||
void
|
||||
FastAppendo(FastList *fl, Oid datum)
|
||||
FastAppendo(FastList * fl, Oid datum)
|
||||
{
|
||||
List *cell = makeListo1(datum);
|
||||
|
||||
@ -267,14 +267,12 @@ FastAppendo(FastList *fl, Oid datum)
|
||||
* Note that the cells of the second argument are absorbed into the FastList.
|
||||
*/
|
||||
void
|
||||
FastConc(FastList *fl, List *cells)
|
||||
FastConc(FastList * fl, List *cells)
|
||||
{
|
||||
if (cells == NIL)
|
||||
return; /* nothing to do */
|
||||
if (fl->tail)
|
||||
{
|
||||
lnext(fl->tail) = cells;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* First cell of list */
|
||||
@ -292,14 +290,12 @@ FastConc(FastList *fl, List *cells)
|
||||
* Note that the cells of the second argument are absorbed into the first.
|
||||
*/
|
||||
void
|
||||
FastConcFast(FastList *fl, FastList *fl2)
|
||||
FastConcFast(FastList * fl, FastList * fl2)
|
||||
{
|
||||
if (fl2->head == NIL)
|
||||
return; /* nothing to do */
|
||||
if (fl->tail)
|
||||
{
|
||||
lnext(fl->tail) = fl2->head;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* First cell of list */
|
||||
@ -319,9 +315,7 @@ nth(int n, List *l)
|
||||
{
|
||||
/* XXX assume list is long enough */
|
||||
while (n-- > 0)
|
||||
{
|
||||
l = lnext(l);
|
||||
}
|
||||
return lfirst(l);
|
||||
}
|
||||
|
||||
@ -781,4 +775,5 @@ lreverse(List *l)
|
||||
result = lcons(lfirst(i), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user