mirror of
https://github.com/postgres/postgres.git
synced 2025-11-12 05:01:15 +03:00
Pgindent run for 8.0.
This commit is contained in:
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.93 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.94 2004/08/29 05:06:39 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The old interface functions have been converted to macros
|
||||
@@ -468,17 +468,19 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If the attribute number is 0, then we are supposed to return
|
||||
* the entire tuple as a row-type Datum. (Using zero for this
|
||||
* purpose is unclean since it risks confusion with "invalid attr"
|
||||
* result codes, but it's not worth changing now.)
|
||||
* If the attribute number is 0, then we are supposed to
|
||||
* return the entire tuple as a row-type Datum. (Using zero
|
||||
* for this purpose is unclean since it risks confusion with
|
||||
* "invalid attr" result codes, but it's not worth changing
|
||||
* now.)
|
||||
*
|
||||
* We have to make a copy of the tuple so we can safely insert the
|
||||
* Datum overhead fields, which are not set in on-disk tuples.
|
||||
* We have to make a copy of the tuple so we can safely insert
|
||||
* the Datum overhead fields, which are not set in on-disk
|
||||
* tuples.
|
||||
*/
|
||||
case InvalidAttrNumber:
|
||||
{
|
||||
HeapTupleHeader dtup;
|
||||
HeapTupleHeader dtup;
|
||||
|
||||
dtup = (HeapTupleHeader) palloc(tup->t_len);
|
||||
memcpy((char *) dtup, (char *) tup->t_data, tup->t_len);
|
||||
@@ -555,7 +557,7 @@ heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
|
||||
* construct a tuple from the given values[] and nulls[] arrays
|
||||
*
|
||||
* Null attributes are indicated by a 'n' in the appropriate byte
|
||||
* of nulls[]. Non-null attributes are indicated by a ' ' (space).
|
||||
* of nulls[]. Non-null attributes are indicated by a ' ' (space).
|
||||
* ----------------
|
||||
*/
|
||||
HeapTuple
|
||||
@@ -580,7 +582,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
|
||||
|
||||
/*
|
||||
* Check for nulls and embedded tuples; expand any toasted attributes
|
||||
* in embedded tuples. This preserves the invariant that toasting can
|
||||
* in embedded tuples. This preserves the invariant that toasting can
|
||||
* only go one level deep.
|
||||
*
|
||||
* We can skip calling toast_flatten_tuple_attribute() if the attribute
|
||||
@@ -620,7 +622,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
|
||||
len += ComputeDataSize(tupleDescriptor, values, nulls);
|
||||
|
||||
/*
|
||||
* Allocate and zero the space needed. Note that the tuple body and
|
||||
* Allocate and zero the space needed. Note that the tuple body and
|
||||
* HeapTupleData management structure are allocated in one chunk.
|
||||
*/
|
||||
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
|
||||
@@ -683,9 +685,9 @@ heap_modifytuple(HeapTuple tuple,
|
||||
* allocate and fill values and nulls arrays from either the tuple or
|
||||
* the repl information, as appropriate.
|
||||
*
|
||||
* NOTE: it's debatable whether to use heap_deformtuple() here or
|
||||
* just heap_getattr() only the non-replaced colums. The latter could
|
||||
* win if there are many replaced columns and few non-replaced ones.
|
||||
* NOTE: it's debatable whether to use heap_deformtuple() here or just
|
||||
* heap_getattr() only the non-replaced colums. The latter could win
|
||||
* if there are many replaced columns and few non-replaced ones.
|
||||
* However, heap_deformtuple costs only O(N) while the heap_getattr
|
||||
* way would cost O(N^2) if there are many non-replaced columns, so it
|
||||
* seems better to err on the side of linear cost.
|
||||
@@ -763,10 +765,11 @@ heap_deformtuple(HeapTuple tuple,
|
||||
bool slow = false; /* can we use/set attcacheoff? */
|
||||
|
||||
natts = tup->t_natts;
|
||||
|
||||
/*
|
||||
* In inheritance situations, it is possible that the given tuple actually
|
||||
* has more fields than the caller is expecting. Don't run off the end
|
||||
* of the caller's arrays.
|
||||
* In inheritance situations, it is possible that the given tuple
|
||||
* actually has more fields than the caller is expecting. Don't run
|
||||
* off the end of the caller's arrays.
|
||||
*/
|
||||
natts = Min(natts, tdesc_natts);
|
||||
|
||||
@@ -787,9 +790,7 @@ heap_deformtuple(HeapTuple tuple,
|
||||
nulls[attnum] = ' ';
|
||||
|
||||
if (!slow && att[attnum]->attcacheoff >= 0)
|
||||
{
|
||||
off = att[attnum]->attcacheoff;
|
||||
}
|
||||
else
|
||||
{
|
||||
off = att_align(off, att[attnum]->attalign);
|
||||
@@ -807,8 +808,8 @@ heap_deformtuple(HeapTuple tuple,
|
||||
}
|
||||
|
||||
/*
|
||||
* If tuple doesn't have all the atts indicated by tupleDesc, read
|
||||
* the rest as null
|
||||
* If tuple doesn't have all the atts indicated by tupleDesc, read the
|
||||
* rest as null
|
||||
*/
|
||||
for (; attnum < tdesc_natts; attnum++)
|
||||
{
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.70 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.71 2004/08/29 05:06:39 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -162,9 +162,9 @@ index_formtuple(TupleDesc tupleDescriptor,
|
||||
if ((size & INDEX_SIZE_MASK) != size)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("index row requires %lu bytes, maximum size is %lu",
|
||||
(unsigned long) size,
|
||||
(unsigned long) INDEX_SIZE_MASK)));
|
||||
errmsg("index row requires %lu bytes, maximum size is %lu",
|
||||
(unsigned long) size,
|
||||
(unsigned long) INDEX_SIZE_MASK)));
|
||||
|
||||
infomask |= size;
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.84 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.85 2004/08/29 05:06:39 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -356,7 +356,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
outputstr = DatumGetCString(FunctionCall3(&thisState->finfo,
|
||||
attr,
|
||||
ObjectIdGetDatum(thisState->typioparam),
|
||||
ObjectIdGetDatum(thisState->typioparam),
|
||||
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
|
||||
pq_sendcountedtext(&buf, outputstr, strlen(outputstr), false);
|
||||
pfree(outputstr);
|
||||
@@ -368,7 +368,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
|
||||
attr,
|
||||
ObjectIdGetDatum(thisState->typioparam)));
|
||||
ObjectIdGetDatum(thisState->typioparam)));
|
||||
/* We assume the result will not have been toasted */
|
||||
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
|
||||
pq_sendbytes(&buf, VARDATA(outputbytes),
|
||||
@@ -458,7 +458,7 @@ printtup_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
outputstr = DatumGetCString(FunctionCall3(&thisState->finfo,
|
||||
attr,
|
||||
ObjectIdGetDatum(thisState->typioparam),
|
||||
ObjectIdGetDatum(thisState->typioparam),
|
||||
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
|
||||
pq_sendcountedtext(&buf, outputstr, strlen(outputstr), true);
|
||||
pfree(outputstr);
|
||||
@@ -579,7 +579,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
value = DatumGetCString(OidFunctionCall3(typoutput,
|
||||
attr,
|
||||
ObjectIdGetDatum(typioparam),
|
||||
ObjectIdGetDatum(typioparam),
|
||||
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
|
||||
|
||||
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
|
||||
@@ -672,7 +672,7 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
|
||||
|
||||
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
|
||||
attr,
|
||||
ObjectIdGetDatum(thisState->typioparam)));
|
||||
ObjectIdGetDatum(thisState->typioparam)));
|
||||
/* We assume the result will not have been toasted */
|
||||
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
|
||||
pq_sendbytes(&buf, VARDATA(outputbytes),
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.105 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.106 2004/08/29 05:06:39 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* some of the executor utility code such as "ExecTypeFromTL" should be
|
||||
@@ -52,8 +52,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
|
||||
|
||||
/*
|
||||
* Allocate enough memory for the tuple descriptor, and zero the
|
||||
* attrs[] array since TupleDescInitEntry assumes that the array
|
||||
* is filled with NULL pointers.
|
||||
* attrs[] array since TupleDescInitEntry assumes that the array is
|
||||
* filled with NULL pointers.
|
||||
*/
|
||||
desc = (TupleDesc) palloc(sizeof(struct tupleDesc));
|
||||
|
||||
@@ -420,8 +420,8 @@ TupleDescInitEntry(TupleDesc desc,
|
||||
|
||||
/*
|
||||
* Note: attributeName can be NULL, because the planner doesn't always
|
||||
* fill in valid resname values in targetlists, particularly for resjunk
|
||||
* attributes.
|
||||
* fill in valid resname values in targetlists, particularly for
|
||||
* resjunk attributes.
|
||||
*/
|
||||
if (attributeName != NULL)
|
||||
namestrcpy(&(att->attname), attributeName);
|
||||
@@ -464,7 +464,7 @@ TupleDescInitEntry(TupleDesc desc,
|
||||
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
|
||||
*
|
||||
* Note: the default assumption is no OIDs; caller may modify the returned
|
||||
* TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
|
||||
* TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
|
||||
* later on.
|
||||
*/
|
||||
TupleDesc
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.110 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.111 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -667,7 +667,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
|
||||
Datum attr[INDEX_MAX_KEYS];
|
||||
bool whatfree[INDEX_MAX_KEYS];
|
||||
char isnull[INDEX_MAX_KEYS];
|
||||
GistEntryVector *evec;
|
||||
GistEntryVector *evec;
|
||||
Datum datum;
|
||||
int datumsize,
|
||||
i,
|
||||
@@ -715,8 +715,8 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
|
||||
{
|
||||
evec->n = 2;
|
||||
gistentryinit(evec->vector[1],
|
||||
evec->vector[0].key, r, NULL,
|
||||
(OffsetNumber) 0, evec->vector[0].bytes, FALSE);
|
||||
evec->vector[0].key, r, NULL,
|
||||
(OffsetNumber) 0, evec->vector[0].bytes, FALSE);
|
||||
|
||||
}
|
||||
else
|
||||
@@ -763,7 +763,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
|
||||
static IndexTuple
|
||||
gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
|
||||
{
|
||||
GistEntryVector *evec;
|
||||
GistEntryVector *evec;
|
||||
Datum datum;
|
||||
int datumsize;
|
||||
bool result,
|
||||
@@ -879,7 +879,7 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV
|
||||
int len,
|
||||
*attrsize;
|
||||
OffsetNumber *entries;
|
||||
GistEntryVector *evec;
|
||||
GistEntryVector *evec;
|
||||
Datum datum;
|
||||
int datumsize;
|
||||
int reallen;
|
||||
@@ -940,8 +940,8 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV
|
||||
else
|
||||
{
|
||||
/*
|
||||
* evec->vector[0].bytes may be not
|
||||
* defined, so form union with itself
|
||||
* evec->vector[0].bytes may be not defined, so form union
|
||||
* with itself
|
||||
*/
|
||||
if (reallen == 1)
|
||||
{
|
||||
@@ -1056,7 +1056,7 @@ gistadjsubkey(Relation r,
|
||||
*ev1p;
|
||||
float lpenalty,
|
||||
rpenalty;
|
||||
GistEntryVector *evec;
|
||||
GistEntryVector *evec;
|
||||
int datumsize;
|
||||
bool isnull[INDEX_MAX_KEYS];
|
||||
int i,
|
||||
@@ -1222,7 +1222,7 @@ gistSplit(Relation r,
|
||||
rbknum;
|
||||
GISTPageOpaque opaque;
|
||||
GIST_SPLITVEC v;
|
||||
GistEntryVector *entryvec;
|
||||
GistEntryVector *entryvec;
|
||||
bool *decompvec;
|
||||
int i,
|
||||
j,
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.41 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.42 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -250,9 +250,10 @@ gistindex_keytest(IndexTuple tuple,
|
||||
FALSE, isNull);
|
||||
|
||||
/*
|
||||
* Call the Consistent function to evaluate the test. The arguments
|
||||
* are the index datum (as a GISTENTRY*), the comparison datum, and
|
||||
* the comparison operator's strategy number and subtype from pg_amop.
|
||||
* Call the Consistent function to evaluate the test. The
|
||||
* arguments are the index datum (as a GISTENTRY*), the comparison
|
||||
* datum, and the comparison operator's strategy number and
|
||||
* subtype from pg_amop.
|
||||
*
|
||||
* (Presently there's no need to pass the subtype since it'll always
|
||||
* be zero, but might as well pass it for possible future use.)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.54 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -115,9 +115,7 @@ gistrescan(PG_FUNCTION_ARGS)
|
||||
* the sk_subtype field.
|
||||
*/
|
||||
for (i = 0; i < s->numberOfKeys; i++)
|
||||
{
|
||||
s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno - 1];
|
||||
}
|
||||
}
|
||||
|
||||
PG_RETURN_VOID();
|
||||
@@ -266,9 +264,9 @@ ReleaseResources_gist(void)
|
||||
GISTScanList next;
|
||||
|
||||
/*
|
||||
* Note: this should be a no-op during normal query shutdown.
|
||||
* However, in an abort situation ExecutorEnd is not called and so
|
||||
* there may be open index scans to clean up.
|
||||
* Note: this should be a no-op during normal query shutdown. However,
|
||||
* in an abort situation ExecutorEnd is not called and so there may be
|
||||
* open index scans to clean up.
|
||||
*/
|
||||
prev = NULL;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.72 2004/08/29 04:12:17 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.73 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains only the public interface routines.
|
||||
@@ -210,8 +210,8 @@ hashgettuple(PG_FUNCTION_ARGS)
|
||||
bool res;
|
||||
|
||||
/*
|
||||
* We hold pin but not lock on current buffer while outside the hash AM.
|
||||
* Reacquire the read lock here.
|
||||
* We hold pin but not lock on current buffer while outside the hash
|
||||
* AM. Reacquire the read lock here.
|
||||
*/
|
||||
if (BufferIsValid(so->hashso_curbuf))
|
||||
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
|
||||
@@ -470,7 +470,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* Read the metapage to fetch original bucket and tuple counts. Also,
|
||||
* we keep a copy of the last-seen metapage so that we can use its
|
||||
* hashm_spares[] values to compute bucket page addresses. This is a
|
||||
* hashm_spares[] values to compute bucket page addresses. This is a
|
||||
* bit hokey but perfectly safe, since the interesting entries in the
|
||||
* spares array cannot change under us; and it beats rereading the
|
||||
* metapage for each bucket.
|
||||
@@ -532,7 +532,7 @@ loop_top:
|
||||
ItemPointer htup;
|
||||
|
||||
hitem = (HashItem) PageGetItem(page,
|
||||
PageGetItemId(page, offno));
|
||||
PageGetItemId(page, offno));
|
||||
htup = &(hitem->hash_itup.t_tid);
|
||||
if (callback(htup, callback_state))
|
||||
{
|
||||
@@ -595,8 +595,8 @@ loop_top:
|
||||
orig_ntuples == metap->hashm_ntuples)
|
||||
{
|
||||
/*
|
||||
* No one has split or inserted anything since start of scan,
|
||||
* so believe our count as gospel.
|
||||
* No one has split or inserted anything since start of scan, so
|
||||
* believe our count as gospel.
|
||||
*/
|
||||
metap->hashm_ntuples = num_index_tuples;
|
||||
}
|
||||
@@ -604,7 +604,7 @@ loop_top:
|
||||
{
|
||||
/*
|
||||
* Otherwise, our count is untrustworthy since we may have
|
||||
* double-scanned tuples in split buckets. Proceed by
|
||||
* double-scanned tuples in split buckets. Proceed by
|
||||
* dead-reckoning.
|
||||
*/
|
||||
if (metap->hashm_ntuples > tuples_removed)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.33 2004/08/29 04:12:18 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.34 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
|
||||
static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf,
|
||||
Size itemsize, HashItem hitem);
|
||||
Size itemsize, HashItem hitem);
|
||||
|
||||
|
||||
/*
|
||||
@@ -81,7 +81,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
|
||||
|
||||
/*
|
||||
* Check whether the item can fit on a hash page at all. (Eventually,
|
||||
* we ought to try to apply TOAST methods if not.) Note that at this
|
||||
* we ought to try to apply TOAST methods if not.) Note that at this
|
||||
* point, itemsz doesn't include the ItemId.
|
||||
*/
|
||||
if (itemsz > HashMaxItemSize((Page) metap))
|
||||
@@ -105,7 +105,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
|
||||
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
|
||||
|
||||
/*
|
||||
* Acquire share lock on target bucket; then we can release split lock.
|
||||
* Acquire share lock on target bucket; then we can release split
|
||||
* lock.
|
||||
*/
|
||||
_hash_getlock(rel, blkno, HASH_SHARE);
|
||||
|
||||
@@ -124,7 +125,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
|
||||
/*
|
||||
* no space on this page; check for an overflow page
|
||||
*/
|
||||
BlockNumber nextblkno = pageopaque->hasho_nextblkno;
|
||||
BlockNumber nextblkno = pageopaque->hasho_nextblkno;
|
||||
|
||||
if (BlockNumberIsValid(nextblkno))
|
||||
{
|
||||
@@ -169,8 +170,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
|
||||
_hash_droplock(rel, blkno, HASH_SHARE);
|
||||
|
||||
/*
|
||||
* Write-lock the metapage so we can increment the tuple count.
|
||||
* After incrementing it, check to see if it's time for a split.
|
||||
* Write-lock the metapage so we can increment the tuple count. After
|
||||
* incrementing it, check to see if it's time for a split.
|
||||
*/
|
||||
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.43 2004/08/29 04:12:18 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.44 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Overflow pages look like ordinary relation pages.
|
||||
@@ -41,11 +41,11 @@ bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum)
|
||||
for (i = 1;
|
||||
i < splitnum && ovflbitnum > metap->hashm_spares[i];
|
||||
i++)
|
||||
/* loop */ ;
|
||||
/* loop */ ;
|
||||
|
||||
/*
|
||||
* Convert to absolute page number by adding the number of bucket pages
|
||||
* that exist before this split point.
|
||||
* Convert to absolute page number by adding the number of bucket
|
||||
* pages that exist before this split point.
|
||||
*/
|
||||
return (BlockNumber) ((1 << i) + ovflbitnum);
|
||||
}
|
||||
@@ -79,7 +79,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
|
||||
*
|
||||
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
|
||||
*
|
||||
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
|
||||
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
|
||||
* dropped before exiting (we assume the caller is not interested in 'buf'
|
||||
* anymore). The returned overflow page will be pinned and write-locked;
|
||||
* it is guaranteed to be empty.
|
||||
@@ -88,12 +88,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
|
||||
* That buffer is returned in the same state.
|
||||
*
|
||||
* The caller must hold at least share lock on the bucket, to ensure that
|
||||
* no one else tries to compact the bucket meanwhile. This guarantees that
|
||||
* no one else tries to compact the bucket meanwhile. This guarantees that
|
||||
* 'buf' won't stop being part of the bucket while it's unlocked.
|
||||
*
|
||||
* NB: since this could be executed concurrently by multiple processes,
|
||||
* one should not assume that the returned overflow page will be the
|
||||
* immediate successor of the originally passed 'buf'. Additional overflow
|
||||
* immediate successor of the originally passed 'buf'. Additional overflow
|
||||
* pages might have been added to the bucket chain in between.
|
||||
*/
|
||||
Buffer
|
||||
@@ -197,7 +197,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
|
||||
/* outer loop iterates once per bitmap page */
|
||||
for (;;)
|
||||
{
|
||||
BlockNumber mapblkno;
|
||||
BlockNumber mapblkno;
|
||||
Page mappage;
|
||||
uint32 last_inpage;
|
||||
|
||||
@@ -274,9 +274,9 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
|
||||
blkno = bitno_to_blkno(metap, bit);
|
||||
|
||||
/*
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't
|
||||
* risk changing it if someone moved it while we were searching
|
||||
* bitmap pages.
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
|
||||
* changing it if someone moved it while we were searching bitmap
|
||||
* pages.
|
||||
*/
|
||||
if (metap->hashm_firstfree == orig_firstfree)
|
||||
metap->hashm_firstfree = bit + 1;
|
||||
@@ -304,9 +304,9 @@ found:
|
||||
blkno = bitno_to_blkno(metap, bit);
|
||||
|
||||
/*
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't
|
||||
* risk changing it if someone moved it while we were searching
|
||||
* bitmap pages.
|
||||
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
|
||||
* changing it if someone moved it while we were searching bitmap
|
||||
* pages.
|
||||
*/
|
||||
if (metap->hashm_firstfree == orig_firstfree)
|
||||
{
|
||||
@@ -381,7 +381,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
|
||||
Bucket bucket;
|
||||
|
||||
/* Get information from the doomed page */
|
||||
ovflblkno = BufferGetBlockNumber(ovflbuf);
|
||||
ovflblkno = BufferGetBlockNumber(ovflbuf);
|
||||
ovflpage = BufferGetPage(ovflbuf);
|
||||
_hash_checkpage(rel, ovflpage, LH_OVERFLOW_PAGE);
|
||||
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
|
||||
@@ -396,7 +396,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
|
||||
/*
|
||||
* Fix up the bucket chain. this is a doubly-linked list, so we must
|
||||
* fix up the bucket chain members behind and ahead of the overflow
|
||||
* page being deleted. No concurrency issues since we hold exclusive
|
||||
* page being deleted. No concurrency issues since we hold exclusive
|
||||
* lock on the entire bucket.
|
||||
*/
|
||||
if (BlockNumberIsValid(prevblkno))
|
||||
@@ -488,7 +488,8 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
|
||||
|
||||
/*
|
||||
* It is okay to write-lock the new bitmap page while holding metapage
|
||||
* write lock, because no one else could be contending for the new page.
|
||||
* write lock, because no one else could be contending for the new
|
||||
* page.
|
||||
*
|
||||
* There is some loss of concurrency in possibly doing I/O for the new
|
||||
* page while holding the metapage lock, but this path is taken so
|
||||
@@ -654,8 +655,8 @@ _hash_squeezebucket(Relation rel,
|
||||
|
||||
/*
|
||||
* delete the tuple from the "read" page. PageIndexTupleDelete
|
||||
* repacks the ItemId array, so 'roffnum' will be "advanced" to
|
||||
* the "next" ItemId.
|
||||
* repacks the ItemId array, so 'roffnum' will be "advanced"
|
||||
* to the "next" ItemId.
|
||||
*/
|
||||
PageIndexTupleDelete(rpage, roffnum);
|
||||
}
|
||||
@@ -667,8 +668,9 @@ _hash_squeezebucket(Relation rel,
|
||||
* Tricky point here: if our read and write pages are adjacent in the
|
||||
* bucket chain, our write lock on wbuf will conflict with
|
||||
* _hash_freeovflpage's attempt to update the sibling links of the
|
||||
* removed page. However, in that case we are done anyway, so we can
|
||||
* simply drop the write lock before calling _hash_freeovflpage.
|
||||
* removed page. However, in that case we are done anyway, so we
|
||||
* can simply drop the write lock before calling
|
||||
* _hash_freeovflpage.
|
||||
*/
|
||||
if (PageIsEmpty(rpage))
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.45 2004/08/29 04:12:18 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.46 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Postgres hash pages look like ordinary relation pages. The opaque
|
||||
@@ -35,11 +35,11 @@
|
||||
|
||||
|
||||
static void _hash_splitbucket(Relation rel, Buffer metabuf,
|
||||
Bucket obucket, Bucket nbucket,
|
||||
BlockNumber start_oblkno,
|
||||
BlockNumber start_nblkno,
|
||||
uint32 maxbucket,
|
||||
uint32 highmask, uint32 lowmask);
|
||||
Bucket obucket, Bucket nbucket,
|
||||
BlockNumber start_oblkno,
|
||||
BlockNumber start_nblkno,
|
||||
uint32 maxbucket,
|
||||
uint32 highmask, uint32 lowmask);
|
||||
|
||||
|
||||
/*
|
||||
@@ -47,7 +47,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
|
||||
* of the locking rules). However, we can skip taking lmgr locks when the
|
||||
* index is local to the current backend (ie, either temp or new in the
|
||||
* current transaction). No one else can see it, so there's no reason to
|
||||
* take locks. We still take buffer-level locks, but not lmgr locks.
|
||||
* take locks. We still take buffer-level locks, but not lmgr locks.
|
||||
*/
|
||||
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
|
||||
|
||||
@@ -239,13 +239,13 @@ _hash_metapinit(Relation rel)
|
||||
RelationGetRelationName(rel));
|
||||
|
||||
/*
|
||||
* Determine the target fill factor (tuples per bucket) for this index.
|
||||
* The idea is to make the fill factor correspond to pages about 3/4ths
|
||||
* full. We can compute it exactly if the index datatype is fixed-width,
|
||||
* but for var-width there's some guessing involved.
|
||||
* Determine the target fill factor (tuples per bucket) for this
|
||||
* index. The idea is to make the fill factor correspond to pages
|
||||
* about 3/4ths full. We can compute it exactly if the index datatype
|
||||
* is fixed-width, but for var-width there's some guessing involved.
|
||||
*/
|
||||
data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid,
|
||||
RelationGetDescr(rel)->attrs[0]->atttypmod);
|
||||
RelationGetDescr(rel)->attrs[0]->atttypmod);
|
||||
item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) +
|
||||
sizeof(ItemIdData); /* include the line pointer */
|
||||
ffactor = (BLCKSZ * 3 / 4) / item_width;
|
||||
@@ -288,8 +288,9 @@ _hash_metapinit(Relation rel)
|
||||
metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
|
||||
|
||||
/*
|
||||
* We initialize the index with two buckets, 0 and 1, occupying physical
|
||||
* blocks 1 and 2. The first freespace bitmap page is in block 3.
|
||||
* We initialize the index with two buckets, 0 and 1, occupying
|
||||
* physical blocks 1 and 2. The first freespace bitmap page is in
|
||||
* block 3.
|
||||
*/
|
||||
metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */
|
||||
metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */
|
||||
@@ -297,7 +298,7 @@ _hash_metapinit(Relation rel)
|
||||
MemSet((char *) metap->hashm_spares, 0, sizeof(metap->hashm_spares));
|
||||
MemSet((char *) metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
|
||||
|
||||
metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */
|
||||
metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */
|
||||
metap->hashm_ovflpoint = 1;
|
||||
metap->hashm_firstfree = 0;
|
||||
|
||||
@@ -319,8 +320,8 @@ _hash_metapinit(Relation rel)
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize first bitmap page. Can't do this until we
|
||||
* create the first two buckets, else smgr will complain.
|
||||
* Initialize first bitmap page. Can't do this until we create the
|
||||
* first two buckets, else smgr will complain.
|
||||
*/
|
||||
_hash_initbitmap(rel, metap, 3);
|
||||
|
||||
@@ -362,17 +363,18 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
uint32 lowmask;
|
||||
|
||||
/*
|
||||
* Obtain the page-zero lock to assert the right to begin a split
|
||||
* (see README).
|
||||
* Obtain the page-zero lock to assert the right to begin a split (see
|
||||
* README).
|
||||
*
|
||||
* Note: deadlock should be impossible here. Our own backend could only
|
||||
* be holding bucket sharelocks due to stopped indexscans; those will not
|
||||
* block other holders of the page-zero lock, who are only interested in
|
||||
* acquiring bucket sharelocks themselves. Exclusive bucket locks are
|
||||
* only taken here and in hashbulkdelete, and neither of these operations
|
||||
* needs any additional locks to complete. (If, due to some flaw in this
|
||||
* reasoning, we manage to deadlock anyway, it's okay to error out; the
|
||||
* index will be left in a consistent state.)
|
||||
* be holding bucket sharelocks due to stopped indexscans; those will
|
||||
* not block other holders of the page-zero lock, who are only
|
||||
* interested in acquiring bucket sharelocks themselves. Exclusive
|
||||
* bucket locks are only taken here and in hashbulkdelete, and neither
|
||||
* of these operations needs any additional locks to complete. (If,
|
||||
* due to some flaw in this reasoning, we manage to deadlock anyway,
|
||||
* it's okay to error out; the index will be left in a consistent
|
||||
* state.)
|
||||
*/
|
||||
_hash_getlock(rel, 0, HASH_EXCLUSIVE);
|
||||
|
||||
@@ -383,8 +385,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
|
||||
|
||||
/*
|
||||
* Check to see if split is still needed; someone else might have already
|
||||
* done one while we waited for the lock.
|
||||
* Check to see if split is still needed; someone else might have
|
||||
* already done one while we waited for the lock.
|
||||
*
|
||||
* Make sure this stays in sync with_hash_doinsert()
|
||||
*/
|
||||
@@ -394,16 +396,16 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
|
||||
/*
|
||||
* Determine which bucket is to be split, and attempt to lock the old
|
||||
* bucket. If we can't get the lock, give up.
|
||||
* bucket. If we can't get the lock, give up.
|
||||
*
|
||||
* The lock protects us against other backends, but not against our own
|
||||
* backend. Must check for active scans separately.
|
||||
*
|
||||
* Ideally we would lock the new bucket too before proceeding, but if
|
||||
* we are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
|
||||
* Ideally we would lock the new bucket too before proceeding, but if we
|
||||
* are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
|
||||
* isn't correct yet. For simplicity we update the metapage first and
|
||||
* then lock. This should be okay because no one else should be trying
|
||||
* to lock the new bucket yet...
|
||||
* then lock. This should be okay because no one else should be
|
||||
* trying to lock the new bucket yet...
|
||||
*/
|
||||
new_bucket = metap->hashm_maxbucket + 1;
|
||||
old_bucket = (new_bucket & metap->hashm_lowmask);
|
||||
@@ -417,7 +419,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Okay to proceed with split. Update the metapage bucket mapping info.
|
||||
* Okay to proceed with split. Update the metapage bucket mapping
|
||||
* info.
|
||||
*/
|
||||
metap->hashm_maxbucket = new_bucket;
|
||||
|
||||
@@ -431,11 +434,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
/*
|
||||
* If the split point is increasing (hashm_maxbucket's log base 2
|
||||
* increases), we need to adjust the hashm_spares[] array and
|
||||
* hashm_ovflpoint so that future overflow pages will be created beyond
|
||||
* this new batch of bucket pages.
|
||||
* hashm_ovflpoint so that future overflow pages will be created
|
||||
* beyond this new batch of bucket pages.
|
||||
*
|
||||
* XXX should initialize new bucket pages to prevent out-of-order
|
||||
* page creation? Don't wanna do it right here though.
|
||||
* XXX should initialize new bucket pages to prevent out-of-order page
|
||||
* creation? Don't wanna do it right here though.
|
||||
*/
|
||||
spare_ndx = _hash_log2(metap->hashm_maxbucket + 1);
|
||||
if (spare_ndx > metap->hashm_ovflpoint)
|
||||
@@ -456,9 +459,10 @@ _hash_expandtable(Relation rel, Buffer metabuf)
|
||||
/*
|
||||
* Copy bucket mapping info now; this saves re-accessing the meta page
|
||||
* inside _hash_splitbucket's inner loop. Note that once we drop the
|
||||
* split lock, other splits could begin, so these values might be out of
|
||||
* date before _hash_splitbucket finishes. That's okay, since all it
|
||||
* needs is to tell which of these two buckets to map hashkeys into.
|
||||
* split lock, other splits could begin, so these values might be out
|
||||
* of date before _hash_splitbucket finishes. That's okay, since all
|
||||
* it needs is to tell which of these two buckets to map hashkeys
|
||||
* into.
|
||||
*/
|
||||
maxbucket = metap->hashm_maxbucket;
|
||||
highmask = metap->hashm_highmask;
|
||||
@@ -539,8 +543,8 @@ _hash_splitbucket(Relation rel,
|
||||
|
||||
/*
|
||||
* It should be okay to simultaneously write-lock pages from each
|
||||
* bucket, since no one else can be trying to acquire buffer lock
|
||||
* on pages of either bucket.
|
||||
* bucket, since no one else can be trying to acquire buffer lock on
|
||||
* pages of either bucket.
|
||||
*/
|
||||
oblkno = start_oblkno;
|
||||
nblkno = start_nblkno;
|
||||
@@ -562,9 +566,9 @@ _hash_splitbucket(Relation rel,
|
||||
nopaque->hasho_filler = HASHO_FILL;
|
||||
|
||||
/*
|
||||
* Partition the tuples in the old bucket between the old bucket and the
|
||||
* new bucket, advancing along the old bucket's overflow bucket chain
|
||||
* and adding overflow pages to the new bucket as needed.
|
||||
* Partition the tuples in the old bucket between the old bucket and
|
||||
* the new bucket, advancing along the old bucket's overflow bucket
|
||||
* chain and adding overflow pages to the new bucket as needed.
|
||||
*/
|
||||
ooffnum = FirstOffsetNumber;
|
||||
omaxoffnum = PageGetMaxOffsetNumber(opage);
|
||||
@@ -582,9 +586,10 @@ _hash_splitbucket(Relation rel,
|
||||
oblkno = oopaque->hasho_nextblkno;
|
||||
if (!BlockNumberIsValid(oblkno))
|
||||
break;
|
||||
|
||||
/*
|
||||
* we ran out of tuples on this particular page, but we
|
||||
* have more overflow pages; advance to next page.
|
||||
* we ran out of tuples on this particular page, but we have
|
||||
* more overflow pages; advance to next page.
|
||||
*/
|
||||
_hash_wrtbuf(rel, obuf);
|
||||
|
||||
@@ -600,8 +605,8 @@ _hash_splitbucket(Relation rel,
|
||||
/*
|
||||
* Re-hash the tuple to determine which bucket it now belongs in.
|
||||
*
|
||||
* It is annoying to call the hash function while holding locks,
|
||||
* but releasing and relocking the page for each tuple is unappealing
|
||||
* It is annoying to call the hash function while holding locks, but
|
||||
* releasing and relocking the page for each tuple is unappealing
|
||||
* too.
|
||||
*/
|
||||
hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
|
||||
@@ -666,10 +671,11 @@ _hash_splitbucket(Relation rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* We're at the end of the old bucket chain, so we're done partitioning
|
||||
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
|
||||
* tuples remaining in the old bucket (including the overflow pages) are
|
||||
* packed as tightly as possible. The new bucket is already tight.
|
||||
* We're at the end of the old bucket chain, so we're done
|
||||
* partitioning the tuples. Before quitting, call _hash_squeezebucket
|
||||
* to ensure the tuples remaining in the old bucket (including the
|
||||
* overflow pages) are packed as tightly as possible. The new bucket
|
||||
* is already tight.
|
||||
*/
|
||||
_hash_wrtbuf(rel, obuf);
|
||||
_hash_wrtbuf(rel, nbuf);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.36 2004/08/29 04:12:18 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.37 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -44,9 +44,9 @@ ReleaseResources_hash(void)
|
||||
HashScanList next;
|
||||
|
||||
/*
|
||||
* Note: this should be a no-op during normal query shutdown.
|
||||
* However, in an abort situation ExecutorEnd is not called and so
|
||||
* there may be open index scans to clean up.
|
||||
* Note: this should be a no-op during normal query shutdown. However,
|
||||
* in an abort situation ExecutorEnd is not called and so there may be
|
||||
* open index scans to clean up.
|
||||
*/
|
||||
prev = NULL;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.36 2004/08/29 04:12:18 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.37 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -137,12 +137,13 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
|
||||
* We do not support hash scans with no index qualification, because
|
||||
* we would have to read the whole index rather than just one bucket.
|
||||
* That creates a whole raft of problems, since we haven't got a
|
||||
* practical way to lock all the buckets against splits or compactions.
|
||||
* practical way to lock all the buckets against splits or
|
||||
* compactions.
|
||||
*/
|
||||
if (scan->numberOfKeys < 1)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("hash indexes do not support whole-index scans")));
|
||||
errmsg("hash indexes do not support whole-index scans")));
|
||||
|
||||
/*
|
||||
* If the constant in the index qual is NULL, assume it cannot match
|
||||
@@ -182,7 +183,8 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
|
||||
_hash_relbuf(rel, metabuf);
|
||||
|
||||
/*
|
||||
* Acquire share lock on target bucket; then we can release split lock.
|
||||
* Acquire share lock on target bucket; then we can release split
|
||||
* lock.
|
||||
*/
|
||||
_hash_getlock(rel, blkno, HASH_SHARE);
|
||||
|
||||
@@ -287,9 +289,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
|
||||
while (offnum > maxoff)
|
||||
{
|
||||
/*
|
||||
* either this page is empty
|
||||
* (maxoff == InvalidOffsetNumber)
|
||||
* or we ran off the end.
|
||||
* either this page is empty (maxoff ==
|
||||
* InvalidOffsetNumber) or we ran off the end.
|
||||
*/
|
||||
_hash_readnext(rel, &buf, &page, &opaque);
|
||||
if (BufferIsValid(buf))
|
||||
@@ -315,15 +316,12 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
|
||||
while (offnum < FirstOffsetNumber)
|
||||
{
|
||||
/*
|
||||
* either this page is empty
|
||||
* (offnum == InvalidOffsetNumber)
|
||||
* or we ran off the end.
|
||||
* either this page is empty (offnum ==
|
||||
* InvalidOffsetNumber) or we ran off the end.
|
||||
*/
|
||||
_hash_readprev(rel, &buf, &page, &opaque);
|
||||
if (BufferIsValid(buf))
|
||||
{
|
||||
maxoff = offnum = PageGetMaxOffsetNumber(page);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* end of bucket */
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.39 2004/08/29 04:12:18 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.40 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -113,6 +113,7 @@ void
|
||||
_hash_checkpage(Relation rel, Page page, int flags)
|
||||
{
|
||||
Assert(page);
|
||||
|
||||
/*
|
||||
* When checking the metapage, always verify magic number and version.
|
||||
*/
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.172 2004/08/29 04:12:20 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.173 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@@ -75,9 +75,9 @@ initscan(HeapScanDesc scan, ScanKey key)
|
||||
/*
|
||||
* Determine the number of blocks we have to scan.
|
||||
*
|
||||
* It is sufficient to do this once at scan start, since any tuples
|
||||
* added while the scan is in progress will be invisible to my
|
||||
* transaction anyway...
|
||||
* It is sufficient to do this once at scan start, since any tuples added
|
||||
* while the scan is in progress will be invisible to my transaction
|
||||
* anyway...
|
||||
*/
|
||||
scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
|
||||
|
||||
@@ -1141,12 +1141,13 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
|
||||
tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
|
||||
HeapTupleHeaderSetXmin(tup->t_data, GetCurrentTransactionId());
|
||||
HeapTupleHeaderSetCmin(tup->t_data, cid);
|
||||
HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */
|
||||
HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */
|
||||
tup->t_tableOid = relation->rd_id;
|
||||
|
||||
/*
|
||||
* If the new tuple is too big for storage or contains already toasted
|
||||
* out-of-line attributes from some other relation, invoke the toaster.
|
||||
* out-of-line attributes from some other relation, invoke the
|
||||
* toaster.
|
||||
*/
|
||||
if (HeapTupleHasExternal(tup) ||
|
||||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
|
||||
@@ -1273,7 +1274,7 @@ simple_heap_insert(Relation relation, HeapTuple tup)
|
||||
*/
|
||||
int
|
||||
heap_delete(Relation relation, ItemPointer tid,
|
||||
ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
|
||||
ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
|
||||
{
|
||||
ItemId lp;
|
||||
HeapTupleData tp;
|
||||
@@ -1404,9 +1405,9 @@ l1:
|
||||
|
||||
/*
|
||||
* If the tuple has toasted out-of-line attributes, we need to delete
|
||||
* those items too. We have to do this before WriteBuffer because we need
|
||||
* to look at the contents of the tuple, but it's OK to release the
|
||||
* context lock on the buffer first.
|
||||
* those items too. We have to do this before WriteBuffer because we
|
||||
* need to look at the contents of the tuple, but it's OK to release
|
||||
* the context lock on the buffer first.
|
||||
*/
|
||||
if (HeapTupleHasExternal(&tp))
|
||||
heap_tuple_toast_attrs(relation, NULL, &tp);
|
||||
@@ -1443,7 +1444,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
|
||||
result = heap_delete(relation, tid,
|
||||
&ctid,
|
||||
GetCurrentCommandId(), SnapshotAny,
|
||||
true /* wait for commit */);
|
||||
true /* wait for commit */ );
|
||||
switch (result)
|
||||
{
|
||||
case HeapTupleSelfUpdated:
|
||||
@@ -1490,7 +1491,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
|
||||
*/
|
||||
int
|
||||
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
||||
ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
|
||||
ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
|
||||
{
|
||||
ItemId lp;
|
||||
HeapTupleData oldtup;
|
||||
@@ -1804,7 +1805,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
|
||||
result = heap_update(relation, otid, tup,
|
||||
&ctid,
|
||||
GetCurrentCommandId(), SnapshotAny,
|
||||
true /* wait for commit */);
|
||||
true /* wait for commit */ );
|
||||
switch (result)
|
||||
{
|
||||
case HeapTupleSelfUpdated:
|
||||
@@ -2198,8 +2199,8 @@ heap_xlog_newpage(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
Page page;
|
||||
|
||||
/*
|
||||
* Note: the NEWPAGE log record is used for both heaps and indexes,
|
||||
* so do not do anything that assumes we are touching a heap.
|
||||
* Note: the NEWPAGE log record is used for both heaps and indexes, so
|
||||
* do not do anything that assumes we are touching a heap.
|
||||
*/
|
||||
|
||||
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
|
||||
@@ -2668,7 +2669,7 @@ static void
|
||||
out_target(char *buf, xl_heaptid *target)
|
||||
{
|
||||
sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u",
|
||||
target->node.spcNode, target->node.dbNode, target->node.relNode,
|
||||
target->node.spcNode, target->node.dbNode, target->node.relNode,
|
||||
ItemPointerGetBlockNumber(&(target->tid)),
|
||||
ItemPointerGetOffsetNumber(&(target->tid)));
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.44 2004/08/29 04:12:20 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.45 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@@ -288,13 +288,13 @@ toast_delete(Relation rel, HeapTuple oldtup)
|
||||
/*
|
||||
* Get the tuple descriptor and break down the tuple into fields.
|
||||
*
|
||||
* NOTE: it's debatable whether to use heap_deformtuple() here or
|
||||
* just heap_getattr() only the varlena columns. The latter could
|
||||
* win if there are few varlena columns and many non-varlena ones.
|
||||
* However, heap_deformtuple costs only O(N) while the heap_getattr
|
||||
* way would cost O(N^2) if there are many varlena columns, so it
|
||||
* seems better to err on the side of linear cost. (We won't even
|
||||
* be here unless there's at least one varlena column, by the way.)
|
||||
* NOTE: it's debatable whether to use heap_deformtuple() here or just
|
||||
* heap_getattr() only the varlena columns. The latter could win if
|
||||
* there are few varlena columns and many non-varlena ones. However,
|
||||
* heap_deformtuple costs only O(N) while the heap_getattr way would
|
||||
* cost O(N^2) if there are many varlena columns, so it seems better
|
||||
* to err on the side of linear cost. (We won't even be here unless
|
||||
* there's at least one varlena column, by the way.)
|
||||
*/
|
||||
tupleDesc = rel->rd_att;
|
||||
att = tupleDesc->attrs;
|
||||
@@ -311,7 +311,7 @@ toast_delete(Relation rel, HeapTuple oldtup)
|
||||
{
|
||||
if (att[i]->attlen == -1)
|
||||
{
|
||||
Datum value = toast_values[i];
|
||||
Datum value = toast_values[i];
|
||||
|
||||
if (toast_nulls[i] != 'n' && VARATT_IS_EXTERNAL(value))
|
||||
toast_delete_datum(rel, value);
|
||||
@@ -791,7 +791,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
|
||||
*
|
||||
* If a Datum is of composite type, "flatten" it to contain no toasted fields.
|
||||
* This must be invoked on any potentially-composite field that is to be
|
||||
* inserted into a tuple. Doing this preserves the invariant that toasting
|
||||
* inserted into a tuple. Doing this preserves the invariant that toasting
|
||||
* goes only one level deep in a tuple.
|
||||
* ----------
|
||||
*/
|
||||
@@ -1105,7 +1105,7 @@ toast_delete_datum(Relation rel, Datum value)
|
||||
ScanKeyInit(&toastkey,
|
||||
(AttrNumber) 1,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
|
||||
/*
|
||||
* Find the chunks by index
|
||||
@@ -1176,7 +1176,7 @@ toast_fetch_datum(varattrib *attr)
|
||||
ScanKeyInit(&toastkey,
|
||||
(AttrNumber) 1,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
|
||||
/*
|
||||
* Read the chunks by index
|
||||
@@ -1330,7 +1330,7 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
|
||||
ScanKeyInit(&toastkey[0],
|
||||
(AttrNumber) 1,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
|
||||
|
||||
/*
|
||||
* Use equality condition for one chunk, a range condition otherwise:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.115 2004/08/29 04:12:21 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.116 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -200,26 +200,26 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
|
||||
* We can skip items that are marked killed.
|
||||
*
|
||||
* Formerly, we applied _bt_isequal() before checking the kill
|
||||
* flag, so as to fall out of the item loop as soon as possible.
|
||||
* However, in the presence of heavy update activity an index
|
||||
* may contain many killed items with the same key; running
|
||||
* _bt_isequal() on each killed item gets expensive. Furthermore
|
||||
* it is likely that the non-killed version of each key appears
|
||||
* first, so that we didn't actually get to exit any sooner anyway.
|
||||
* So now we just advance over killed items as quickly as we can.
|
||||
* We only apply _bt_isequal() when we get to a non-killed item or
|
||||
* the end of the page.
|
||||
* flag, so as to fall out of the item loop as soon as
|
||||
* possible. However, in the presence of heavy update activity
|
||||
* an index may contain many killed items with the same key;
|
||||
* running _bt_isequal() on each killed item gets expensive.
|
||||
* Furthermore it is likely that the non-killed version of
|
||||
* each key appears first, so that we didn't actually get to
|
||||
* exit any sooner anyway. So now we just advance over killed
|
||||
* items as quickly as we can. We only apply _bt_isequal()
|
||||
* when we get to a non-killed item or the end of the page.
|
||||
*/
|
||||
if (!ItemIdDeleted(curitemid))
|
||||
{
|
||||
/*
|
||||
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
|
||||
* how we handling NULLs - and so we must not use _bt_compare
|
||||
* in real comparison, but only for ordering/finding items on
|
||||
* pages. - vadim 03/24/97
|
||||
* _bt_compare returns 0 for (1,NULL) and (1,NULL) -
|
||||
* this's how we handling NULLs - and so we must not use
|
||||
* _bt_compare in real comparison, but only for
|
||||
* ordering/finding items on pages. - vadim 03/24/97
|
||||
*/
|
||||
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
|
||||
break; /* we're past all the equal tuples */
|
||||
break; /* we're past all the equal tuples */
|
||||
|
||||
/* okay, we gotta fetch the heap tuple ... */
|
||||
cbti = (BTItem) PageGetItem(page, curitemid);
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.79 2004/08/29 04:12:21 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.80 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Postgres btree pages look like ordinary relation pages. The opaque
|
||||
@@ -276,8 +276,8 @@ _bt_getroot(Relation rel, int access)
|
||||
rootlevel = metad->btm_fastlevel;
|
||||
|
||||
/*
|
||||
* We are done with the metapage; arrange to release it via
|
||||
* first _bt_relandgetbuf call
|
||||
* We are done with the metapage; arrange to release it via first
|
||||
* _bt_relandgetbuf call
|
||||
*/
|
||||
rootbuf = metabuf;
|
||||
|
||||
@@ -368,8 +368,8 @@ _bt_gettrueroot(Relation rel)
|
||||
rootlevel = metad->btm_level;
|
||||
|
||||
/*
|
||||
* We are done with the metapage; arrange to release it via
|
||||
* first _bt_relandgetbuf call
|
||||
* We are done with the metapage; arrange to release it via first
|
||||
* _bt_relandgetbuf call
|
||||
*/
|
||||
rootbuf = metabuf;
|
||||
|
||||
@@ -433,21 +433,22 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
|
||||
* page could have been re-used between the time the last VACUUM
|
||||
* scanned it and the time the VACUUM made its FSM updates.)
|
||||
*
|
||||
* In fact, it's worse than that: we can't even assume that it's
|
||||
* safe to take a lock on the reported page. If somebody else
|
||||
* has a lock on it, or even worse our own caller does, we could
|
||||
* In fact, it's worse than that: we can't even assume that it's safe
|
||||
* to take a lock on the reported page. If somebody else has a
|
||||
* lock on it, or even worse our own caller does, we could
|
||||
* deadlock. (The own-caller scenario is actually not improbable.
|
||||
* Consider an index on a serial or timestamp column. Nearly all
|
||||
* splits will be at the rightmost page, so it's entirely likely
|
||||
* that _bt_split will call us while holding a lock on the page most
|
||||
* recently acquired from FSM. A VACUUM running concurrently with
|
||||
* the previous split could well have placed that page back in FSM.)
|
||||
* that _bt_split will call us while holding a lock on the page
|
||||
* most recently acquired from FSM. A VACUUM running concurrently
|
||||
* with the previous split could well have placed that page back
|
||||
* in FSM.)
|
||||
*
|
||||
* To get around that, we ask for only a conditional lock on the
|
||||
* reported page. If we fail, then someone else is using the page,
|
||||
* and we may reasonably assume it's not free. (If we happen to be
|
||||
* wrong, the worst consequence is the page will be lost to use till
|
||||
* the next VACUUM, which is no big problem.)
|
||||
* reported page. If we fail, then someone else is using the
|
||||
* page, and we may reasonably assume it's not free. (If we
|
||||
* happen to be wrong, the worst consequence is the page will be
|
||||
* lost to use till the next VACUUM, which is no big problem.)
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.88 2004/08/29 04:12:21 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.89 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -155,15 +155,16 @@ _bt_moveright(Relation rel,
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
/*
|
||||
* When nextkey = false (normal case): if the scan key that brought us to
|
||||
* this page is > the high key stored on the page, then the page has split
|
||||
* and we need to move right. (If the scan key is equal to the high key,
|
||||
* we might or might not need to move right; have to scan the page first
|
||||
* anyway.)
|
||||
* When nextkey = false (normal case): if the scan key that brought us
|
||||
* to this page is > the high key stored on the page, then the page
|
||||
* has split and we need to move right. (If the scan key is equal to
|
||||
* the high key, we might or might not need to move right; have to
|
||||
* scan the page first anyway.)
|
||||
*
|
||||
* When nextkey = true: move right if the scan key is >= page's high key.
|
||||
*
|
||||
* The page could even have split more than once, so scan as far as needed.
|
||||
* The page could even have split more than once, so scan as far as
|
||||
* needed.
|
||||
*
|
||||
* We also have to move right if we followed a link that brought us to a
|
||||
* dead page.
|
||||
@@ -253,13 +254,11 @@ _bt_binsrch(Relation rel,
|
||||
* Binary search to find the first key on the page >= scan key, or
|
||||
* first key > scankey when nextkey is true.
|
||||
*
|
||||
* For nextkey=false (cmpval=1), the loop invariant is: all slots
|
||||
* before 'low' are < scan key, all slots at or after 'high'
|
||||
* are >= scan key.
|
||||
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
|
||||
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
|
||||
*
|
||||
* For nextkey=true (cmpval=0), the loop invariant is: all slots
|
||||
* before 'low' are <= scan key, all slots at or after 'high'
|
||||
* are > scan key.
|
||||
* For nextkey=true (cmpval=0), the loop invariant is: all slots before
|
||||
* 'low' are <= scan key, all slots at or after 'high' are > scan key.
|
||||
*
|
||||
* We can fall out when high == low.
|
||||
*/
|
||||
@@ -285,15 +284,15 @@ _bt_binsrch(Relation rel,
|
||||
* At this point we have high == low, but be careful: they could point
|
||||
* past the last slot on the page.
|
||||
*
|
||||
* On a leaf page, we always return the first key >= scan key (resp.
|
||||
* > scan key), which could be the last slot + 1.
|
||||
* On a leaf page, we always return the first key >= scan key (resp. >
|
||||
* scan key), which could be the last slot + 1.
|
||||
*/
|
||||
if (P_ISLEAF(opaque))
|
||||
return low;
|
||||
|
||||
/*
|
||||
* On a non-leaf page, return the last key < scan key (resp. <= scan key).
|
||||
* There must be one if _bt_compare() is playing by the rules.
|
||||
* On a non-leaf page, return the last key < scan key (resp. <= scan
|
||||
* key). There must be one if _bt_compare() is playing by the rules.
|
||||
*/
|
||||
Assert(low > P_FIRSTDATAKEY(opaque));
|
||||
|
||||
@@ -382,10 +381,10 @@ _bt_compare(Relation rel,
|
||||
{
|
||||
/*
|
||||
* The sk_func needs to be passed the index value as left arg
|
||||
* and the sk_argument as right arg (they might be of different
|
||||
* types). Since it is convenient for callers to think of
|
||||
* _bt_compare as comparing the scankey to the index item,
|
||||
* we have to flip the sign of the comparison result.
|
||||
* and the sk_argument as right arg (they might be of
|
||||
* different types). Since it is convenient for callers to
|
||||
* think of _bt_compare as comparing the scankey to the index
|
||||
* item, we have to flip the sign of the comparison result.
|
||||
*
|
||||
* Note: curious-looking coding is to avoid overflow if
|
||||
* comparison function returns INT_MIN. There is no risk of
|
||||
@@ -497,7 +496,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
bool goback;
|
||||
bool continuescan;
|
||||
ScanKey scankeys;
|
||||
ScanKey *startKeys = NULL;
|
||||
ScanKey *startKeys = NULL;
|
||||
int keysCount = 0;
|
||||
int i;
|
||||
StrategyNumber strat_total;
|
||||
@@ -521,7 +520,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
* We want to identify the keys that can be used as starting boundaries;
|
||||
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
|
||||
* a backwards scan. We can use keys for multiple attributes so long as
|
||||
* the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
|
||||
* the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
|
||||
* a > or < boundary or find an attribute with no boundary (which can be
|
||||
* thought of as the same as "> -infinity"), we can't use keys for any
|
||||
* attributes to its right, because it would break our simplistic notion
|
||||
@@ -554,13 +553,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
ScanKey cur;
|
||||
|
||||
startKeys = (ScanKey *) palloc(so->numberOfKeys * sizeof(ScanKey));
|
||||
|
||||
/*
|
||||
* chosen is the so-far-chosen key for the current attribute, if any.
|
||||
* We don't cast the decision in stone until we reach keys for the
|
||||
* next attribute.
|
||||
* chosen is the so-far-chosen key for the current attribute, if
|
||||
* any. We don't cast the decision in stone until we reach keys
|
||||
* for the next attribute.
|
||||
*/
|
||||
curattr = 1;
|
||||
chosen = NULL;
|
||||
|
||||
/*
|
||||
* Loop iterates from 0 to numberOfKeys inclusive; we use the last
|
||||
* pass to handle after-last-key processing. Actual exit from the
|
||||
@@ -578,8 +579,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
if (chosen == NULL)
|
||||
break;
|
||||
startKeys[keysCount++] = chosen;
|
||||
|
||||
/*
|
||||
* Adjust strat_total, and quit if we have stored a > or < key.
|
||||
* Adjust strat_total, and quit if we have stored a > or <
|
||||
* key.
|
||||
*/
|
||||
strat = chosen->sk_strategy;
|
||||
if (strat != BTEqualStrategyNumber)
|
||||
@@ -589,11 +592,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
strat == BTLessStrategyNumber)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Done if that was the last attribute.
|
||||
*/
|
||||
if (i >= so->numberOfKeys)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Reset for next attr, which should be in sequence.
|
||||
*/
|
||||
@@ -646,8 +651,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
ScanKey cur = startKeys[i];
|
||||
|
||||
/*
|
||||
* _bt_preprocess_keys disallows it, but it's place to add some code
|
||||
* later
|
||||
* _bt_preprocess_keys disallows it, but it's place to add some
|
||||
* code later
|
||||
*/
|
||||
if (cur->sk_flags & SK_ISNULL)
|
||||
{
|
||||
@@ -656,10 +661,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
elog(ERROR, "btree doesn't support is(not)null, yet");
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If scankey operator is of default subtype, we can use the
|
||||
* cached comparison procedure; otherwise gotta look it up in
|
||||
* the catalogs.
|
||||
* cached comparison procedure; otherwise gotta look it up in the
|
||||
* catalogs.
|
||||
*/
|
||||
if (cur->sk_subtype == InvalidOid)
|
||||
{
|
||||
@@ -695,43 +701,46 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
|
||||
/*
|
||||
* Examine the selected initial-positioning strategy to determine
|
||||
* exactly where we need to start the scan, and set flag variables
|
||||
* to control the code below.
|
||||
* exactly where we need to start the scan, and set flag variables to
|
||||
* control the code below.
|
||||
*
|
||||
* If nextkey = false, _bt_search and _bt_binsrch will locate the
|
||||
* first item >= scan key. If nextkey = true, they will locate the
|
||||
* first item > scan key.
|
||||
* If nextkey = false, _bt_search and _bt_binsrch will locate the first
|
||||
* item >= scan key. If nextkey = true, they will locate the first
|
||||
* item > scan key.
|
||||
*
|
||||
* If goback = true, we will then step back one item, while if
|
||||
* goback = false, we will start the scan on the located item.
|
||||
* If goback = true, we will then step back one item, while if goback =
|
||||
* false, we will start the scan on the located item.
|
||||
*
|
||||
* it's yet other place to add some code later for is(not)null ...
|
||||
*/
|
||||
switch (strat_total)
|
||||
{
|
||||
case BTLessStrategyNumber:
|
||||
|
||||
/*
|
||||
* Find first item >= scankey, then back up one to arrive at last
|
||||
* item < scankey. (Note: this positioning strategy is only used
|
||||
* for a backward scan, so that is always the correct starting
|
||||
* position.)
|
||||
* Find first item >= scankey, then back up one to arrive at
|
||||
* last item < scankey. (Note: this positioning strategy is
|
||||
* only used for a backward scan, so that is always the
|
||||
* correct starting position.)
|
||||
*/
|
||||
nextkey = false;
|
||||
goback = true;
|
||||
break;
|
||||
|
||||
case BTLessEqualStrategyNumber:
|
||||
|
||||
/*
|
||||
* Find first item > scankey, then back up one to arrive at last
|
||||
* item <= scankey. (Note: this positioning strategy is only used
|
||||
* for a backward scan, so that is always the correct starting
|
||||
* position.)
|
||||
* Find first item > scankey, then back up one to arrive at
|
||||
* last item <= scankey. (Note: this positioning strategy is
|
||||
* only used for a backward scan, so that is always the
|
||||
* correct starting position.)
|
||||
*/
|
||||
nextkey = true;
|
||||
goback = true;
|
||||
break;
|
||||
|
||||
case BTEqualStrategyNumber:
|
||||
|
||||
/*
|
||||
* If a backward scan was specified, need to start with last
|
||||
* equal item not first one.
|
||||
@@ -739,8 +748,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
if (ScanDirectionIsBackward(dir))
|
||||
{
|
||||
/*
|
||||
* This is the same as the <= strategy. We will check
|
||||
* at the end whether the found item is actually =.
|
||||
* This is the same as the <= strategy. We will check at
|
||||
* the end whether the found item is actually =.
|
||||
*/
|
||||
nextkey = true;
|
||||
goback = true;
|
||||
@@ -748,8 +757,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* This is the same as the >= strategy. We will check
|
||||
* at the end whether the found item is actually =.
|
||||
* This is the same as the >= strategy. We will check at
|
||||
* the end whether the found item is actually =.
|
||||
*/
|
||||
nextkey = false;
|
||||
goback = false;
|
||||
@@ -757,18 +766,20 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
break;
|
||||
|
||||
case BTGreaterEqualStrategyNumber:
|
||||
|
||||
/*
|
||||
* Find first item >= scankey. (This is only used for
|
||||
* forward scans.)
|
||||
* Find first item >= scankey. (This is only used for forward
|
||||
* scans.)
|
||||
*/
|
||||
nextkey = false;
|
||||
goback = false;
|
||||
break;
|
||||
|
||||
case BTGreaterStrategyNumber:
|
||||
|
||||
/*
|
||||
* Find first item > scankey. (This is only used for
|
||||
* forward scans.)
|
||||
* Find first item > scankey. (This is only used for forward
|
||||
* scans.)
|
||||
*/
|
||||
nextkey = true;
|
||||
goback = false;
|
||||
@@ -814,23 +825,23 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
pfree(scankeys);
|
||||
|
||||
/*
|
||||
* If nextkey = false, we are positioned at the first item >= scan key,
|
||||
* or possibly at the end of a page on which all the existing items are
|
||||
* less than the scan key and we know that everything on later pages
|
||||
* is greater than or equal to scan key.
|
||||
* If nextkey = false, we are positioned at the first item >= scan
|
||||
* key, or possibly at the end of a page on which all the existing
|
||||
* items are less than the scan key and we know that everything on
|
||||
* later pages is greater than or equal to scan key.
|
||||
*
|
||||
* If nextkey = true, we are positioned at the first item > scan key,
|
||||
* or possibly at the end of a page on which all the existing items are
|
||||
* If nextkey = true, we are positioned at the first item > scan key, or
|
||||
* possibly at the end of a page on which all the existing items are
|
||||
* less than or equal to the scan key and we know that everything on
|
||||
* later pages is greater than scan key.
|
||||
*
|
||||
* The actually desired starting point is either this item or the prior
|
||||
* one, or in the end-of-page case it's the first item on the next page
|
||||
* or the last item on this page. We apply _bt_step if needed to get to
|
||||
* the right place.
|
||||
* one, or in the end-of-page case it's the first item on the next
|
||||
* page or the last item on this page. We apply _bt_step if needed to
|
||||
* get to the right place.
|
||||
*
|
||||
* If _bt_step fails (meaning we fell off the end of the index in
|
||||
* one direction or the other), then there are no matches so we just
|
||||
* If _bt_step fails (meaning we fell off the end of the index in one
|
||||
* direction or the other), then there are no matches so we just
|
||||
* return false.
|
||||
*/
|
||||
if (goback)
|
||||
@@ -1292,7 +1303,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
|
||||
itup = &(btitem->bti_itup);
|
||||
|
||||
/*
|
||||
* Okay, we are on the first or last tuple. Does it pass all the quals?
|
||||
* Okay, we are on the first or last tuple. Does it pass all the
|
||||
* quals?
|
||||
*/
|
||||
if (_bt_checkkeys(scan, itup, dir, &continuescan))
|
||||
{
|
||||
|
||||
@@ -41,11 +41,11 @@
|
||||
*
|
||||
* Since the index will never be used unless it is completely built,
|
||||
* from a crash-recovery point of view there is no need to WAL-log the
|
||||
* steps of the build. After completing the index build, we can just sync
|
||||
* steps of the build. After completing the index build, we can just sync
|
||||
* the whole file to disk using smgrimmedsync() before exiting this module.
|
||||
* This can be seen to be sufficient for crash recovery by considering that
|
||||
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
|
||||
* just after the index build. However, it is clearly not sufficient if the
|
||||
* just after the index build. However, it is clearly not sufficient if the
|
||||
* DBA is using the WAL log for PITR or replication purposes, since another
|
||||
* machine would not be able to reconstruct the index from WAL. Therefore,
|
||||
* we log the completed index pages to WAL if and only if WAL archiving is
|
||||
@@ -56,7 +56,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.87 2004/08/29 04:12:21 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.88 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -98,7 +98,7 @@ struct BTSpool
|
||||
typedef struct BTPageState
|
||||
{
|
||||
Page btps_page; /* workspace for page building */
|
||||
BlockNumber btps_blkno; /* block # to write this page at */
|
||||
BlockNumber btps_blkno; /* block # to write this page at */
|
||||
BTItem btps_minkey; /* copy of minimum key (first item) on
|
||||
* page */
|
||||
OffsetNumber btps_lastoff; /* last item offset loaded */
|
||||
@@ -114,10 +114,10 @@ typedef struct BTPageState
|
||||
typedef struct BTWriteState
|
||||
{
|
||||
Relation index;
|
||||
bool btws_use_wal; /* dump pages to WAL? */
|
||||
BlockNumber btws_pages_alloced; /* # pages allocated */
|
||||
BlockNumber btws_pages_written; /* # pages written out */
|
||||
Page btws_zeropage; /* workspace for filling zeroes */
|
||||
bool btws_use_wal; /* dump pages to WAL? */
|
||||
BlockNumber btws_pages_alloced; /* # pages allocated */
|
||||
BlockNumber btws_pages_written; /* # pages written out */
|
||||
Page btws_zeropage; /* workspace for filling zeroes */
|
||||
} BTWriteState;
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ static void _bt_sortaddtup(Page page, Size itemsize,
|
||||
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti);
|
||||
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state);
|
||||
static void _bt_load(BTWriteState *wstate,
|
||||
BTSpool *btspool, BTSpool *btspool2);
|
||||
BTSpool *btspool, BTSpool *btspool2);
|
||||
|
||||
|
||||
/*
|
||||
@@ -157,12 +157,12 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead)
|
||||
btspool->isunique = isunique;
|
||||
|
||||
/*
|
||||
* We size the sort area as maintenance_work_mem rather than work_mem to
|
||||
* speed index creation. This should be OK since a single backend can't
|
||||
* run multiple index creations in parallel. Note that creation of a
|
||||
* unique index actually requires two BTSpool objects. We expect that the
|
||||
* second one (for dead tuples) won't get very full, so we give it only
|
||||
* work_mem.
|
||||
* We size the sort area as maintenance_work_mem rather than work_mem
|
||||
* to speed index creation. This should be OK since a single backend
|
||||
* can't run multiple index creations in parallel. Note that creation
|
||||
* of a unique index actually requires two BTSpool objects. We expect
|
||||
* that the second one (for dead tuples) won't get very full, so we
|
||||
* give it only work_mem.
|
||||
*/
|
||||
btKbytes = isdead ? work_mem : maintenance_work_mem;
|
||||
btspool->sortstate = tuplesort_begin_index(index, isunique,
|
||||
@@ -205,7 +205,7 @@ _bt_spool(BTItem btitem, BTSpool *btspool)
|
||||
void
|
||||
_bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
|
||||
{
|
||||
BTWriteState wstate;
|
||||
BTWriteState wstate;
|
||||
|
||||
#ifdef BTREE_BUILD_STATS
|
||||
if (log_btree_build_stats)
|
||||
@@ -220,6 +220,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
|
||||
tuplesort_performsort(btspool2->sortstate);
|
||||
|
||||
wstate.index = btspool->index;
|
||||
|
||||
/*
|
||||
* We need to log index creation in WAL iff WAL archiving is enabled
|
||||
* AND it's not a temp index.
|
||||
@@ -229,7 +230,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
|
||||
/* reserve the metapage */
|
||||
wstate.btws_pages_alloced = BTREE_METAPAGE + 1;
|
||||
wstate.btws_pages_written = 0;
|
||||
wstate.btws_zeropage = NULL; /* until needed */
|
||||
wstate.btws_zeropage = NULL; /* until needed */
|
||||
|
||||
_bt_load(&wstate, btspool, btspool2);
|
||||
}
|
||||
@@ -246,7 +247,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
|
||||
static Page
|
||||
_bt_blnewpage(uint32 level)
|
||||
{
|
||||
Page page;
|
||||
Page page;
|
||||
BTPageOpaque opaque;
|
||||
|
||||
page = (Page) palloc(BLCKSZ);
|
||||
@@ -313,8 +314,8 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
|
||||
* If we have to write pages nonsequentially, fill in the space with
|
||||
* zeroes until we come back and overwrite. This is not logically
|
||||
* necessary on standard Unix filesystems (unwritten space will read
|
||||
* as zeroes anyway), but it should help to avoid fragmentation.
|
||||
* The dummy pages aren't WAL-logged though.
|
||||
* as zeroes anyway), but it should help to avoid fragmentation. The
|
||||
* dummy pages aren't WAL-logged though.
|
||||
*/
|
||||
while (blkno > wstate->btws_pages_written)
|
||||
{
|
||||
@@ -326,9 +327,9 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
|
||||
}
|
||||
|
||||
/*
|
||||
* Now write the page. We say isTemp = true even if it's not a
|
||||
* temp index, because there's no need for smgr to schedule an fsync
|
||||
* for this write; we'll do it ourselves before ending the build.
|
||||
* Now write the page. We say isTemp = true even if it's not a temp
|
||||
* index, because there's no need for smgr to schedule an fsync for
|
||||
* this write; we'll do it ourselves before ending the build.
|
||||
*/
|
||||
smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true);
|
||||
|
||||
@@ -468,7 +469,7 @@ static void
|
||||
_bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
|
||||
{
|
||||
Page npage;
|
||||
BlockNumber nblkno;
|
||||
BlockNumber nblkno;
|
||||
OffsetNumber last_off;
|
||||
Size pgspc;
|
||||
Size btisz;
|
||||
@@ -506,7 +507,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
|
||||
* already. Finish off the page and write it out.
|
||||
*/
|
||||
Page opage = npage;
|
||||
BlockNumber oblkno = nblkno;
|
||||
BlockNumber oblkno = nblkno;
|
||||
ItemId ii;
|
||||
ItemId hii;
|
||||
BTItem obti;
|
||||
@@ -539,8 +540,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
|
||||
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
|
||||
|
||||
/*
|
||||
* Link the old page into its parent, using its minimum key. If
|
||||
* we don't have a parent, we have to create one; this adds a new
|
||||
* Link the old page into its parent, using its minimum key. If we
|
||||
* don't have a parent, we have to create one; this adds a new
|
||||
* btree level.
|
||||
*/
|
||||
if (state->btps_next == NULL)
|
||||
@@ -572,8 +573,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
|
||||
}
|
||||
|
||||
/*
|
||||
* Write out the old page. We never need to touch it again,
|
||||
* so we can free the opage workspace too.
|
||||
* Write out the old page. We never need to touch it again, so we
|
||||
* can free the opage workspace too.
|
||||
*/
|
||||
_bt_blwritepage(wstate, opage, oblkno);
|
||||
|
||||
@@ -613,7 +614,7 @@ static void
|
||||
_bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
|
||||
{
|
||||
BTPageState *s;
|
||||
BlockNumber rootblkno = P_NONE;
|
||||
BlockNumber rootblkno = P_NONE;
|
||||
uint32 rootlevel = 0;
|
||||
Page metapage;
|
||||
|
||||
@@ -663,9 +664,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
|
||||
|
||||
/*
|
||||
* As the last step in the process, construct the metapage and make it
|
||||
* point to the new root (unless we had no data at all, in which case it's
|
||||
* set to point to "P_NONE"). This changes the index to the "valid"
|
||||
* state by filling in a valid magic number in the metapage.
|
||||
* point to the new root (unless we had no data at all, in which case
|
||||
* it's set to point to "P_NONE"). This changes the index to the
|
||||
* "valid" state by filling in a valid magic number in the metapage.
|
||||
*/
|
||||
metapage = (Page) palloc(BLCKSZ);
|
||||
_bt_initmetapage(metapage, rootblkno, rootlevel);
|
||||
@@ -744,7 +745,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
||||
|
||||
compare = DatumGetInt32(FunctionCall2(&entry->sk_func,
|
||||
attrDatum1,
|
||||
attrDatum2));
|
||||
attrDatum2));
|
||||
if (compare > 0)
|
||||
{
|
||||
load1 = false;
|
||||
@@ -768,7 +769,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
||||
if (should_free)
|
||||
pfree((void *) bti);
|
||||
bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
|
||||
true, &should_free);
|
||||
true, &should_free);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -776,7 +777,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
||||
if (should_free2)
|
||||
pfree((void *) bti2);
|
||||
bti2 = (BTItem) tuplesort_getindextuple(btspool2->sortstate,
|
||||
true, &should_free2);
|
||||
true, &should_free2);
|
||||
}
|
||||
}
|
||||
_bt_freeskey(indexScanKey);
|
||||
@@ -785,7 +786,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
||||
{
|
||||
/* merge is unnecessary */
|
||||
while ((bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
|
||||
true, &should_free)) != NULL)
|
||||
true, &should_free)) != NULL)
|
||||
{
|
||||
/* When we see first tuple, create first index page */
|
||||
if (state == NULL)
|
||||
@@ -802,18 +803,18 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
||||
|
||||
/*
|
||||
* If the index isn't temp, we must fsync it down to disk before it's
|
||||
* safe to commit the transaction. (For a temp index we don't care
|
||||
* safe to commit the transaction. (For a temp index we don't care
|
||||
* since the index will be uninteresting after a crash anyway.)
|
||||
*
|
||||
* It's obvious that we must do this when not WAL-logging the build.
|
||||
* It's less obvious that we have to do it even if we did WAL-log the
|
||||
* index pages. The reason is that since we're building outside
|
||||
* shared buffers, a CHECKPOINT occurring during the build has no way
|
||||
* to flush the previously written data to disk (indeed it won't know
|
||||
* the index even exists). A crash later on would replay WAL from the
|
||||
* It's obvious that we must do this when not WAL-logging the build. It's
|
||||
* less obvious that we have to do it even if we did WAL-log the index
|
||||
* pages. The reason is that since we're building outside shared
|
||||
* buffers, a CHECKPOINT occurring during the build has no way to
|
||||
* flush the previously written data to disk (indeed it won't know the
|
||||
* index even exists). A crash later on would replay WAL from the
|
||||
* checkpoint, therefore it wouldn't replay our earlier WAL entries.
|
||||
* If we do not fsync those pages here, they might still not be on disk
|
||||
* when the crash occurs.
|
||||
* If we do not fsync those pages here, they might still not be on
|
||||
* disk when the crash occurs.
|
||||
*/
|
||||
if (!wstate->index->rd_istemp)
|
||||
smgrimmedsync(wstate->index->rd_smgr);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.59 2004/08/29 04:12:21 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.60 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -48,8 +48,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
|
||||
bool null;
|
||||
|
||||
/*
|
||||
* We can use the cached (default) support procs since no cross-type
|
||||
* comparison can be needed.
|
||||
* We can use the cached (default) support procs since no
|
||||
* cross-type comparison can be needed.
|
||||
*/
|
||||
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
|
||||
arg = index_getattr(itup, i + 1, itupdesc, &null);
|
||||
@@ -68,7 +68,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
|
||||
/*
|
||||
* _bt_mkscankey_nodata
|
||||
* Build a scan key that contains comparator routines appropriate to
|
||||
* the key datatypes, but no comparison data. The comparison data
|
||||
* the key datatypes, but no comparison data. The comparison data
|
||||
* ultimately used must match the key datatypes.
|
||||
*
|
||||
* The result cannot be used with _bt_compare(). Currently this
|
||||
@@ -93,8 +93,8 @@ _bt_mkscankey_nodata(Relation rel)
|
||||
FmgrInfo *procinfo;
|
||||
|
||||
/*
|
||||
* We can use the cached (default) support procs since no cross-type
|
||||
* comparison can be needed.
|
||||
* We can use the cached (default) support procs since no
|
||||
* cross-type comparison can be needed.
|
||||
*/
|
||||
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
|
||||
ScanKeyEntryInitializeWithInfo(&skey[i],
|
||||
@@ -163,12 +163,12 @@ _bt_formitem(IndexTuple itup)
|
||||
* _bt_preprocess_keys() -- Preprocess scan keys
|
||||
*
|
||||
* The caller-supplied keys (in scan->keyData[]) are copied to
|
||||
* so->keyData[] with possible transformation. scan->numberOfKeys is
|
||||
* so->keyData[] with possible transformation. scan->numberOfKeys is
|
||||
* the number of input keys, so->numberOfKeys gets the number of output
|
||||
* keys (possibly less, never greater).
|
||||
*
|
||||
* The primary purpose of this routine is to discover how many scan keys
|
||||
* must be satisfied to continue the scan. It also attempts to eliminate
|
||||
* must be satisfied to continue the scan. It also attempts to eliminate
|
||||
* redundant keys and detect contradictory keys. At present, redundant and
|
||||
* contradictory keys can only be detected for same-data-type comparisons,
|
||||
* but that's the usual case so it seems worth doing.
|
||||
@@ -198,7 +198,7 @@ _bt_formitem(IndexTuple itup)
|
||||
* or one or two boundary-condition keys for each attr.) However, we can
|
||||
* only detect redundant keys when the right-hand datatypes are all equal
|
||||
* to the index datatype, because we do not know suitable operators for
|
||||
* comparing right-hand values of two different datatypes. (In theory
|
||||
* comparing right-hand values of two different datatypes. (In theory
|
||||
* we could handle comparison of a RHS of the index datatype with a RHS of
|
||||
* another type, but that seems too much pain for too little gain.) So,
|
||||
* keys whose operator has a nondefault subtype (ie, its RHS is not of the
|
||||
@@ -285,9 +285,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
|
||||
*
|
||||
* xform[i] points to the currently best scan key of strategy type i+1,
|
||||
* if any is found with a default operator subtype; it is NULL if we
|
||||
* haven't yet found such a key for this attr. Scan keys of nondefault
|
||||
* subtypes are transferred to the output with no processing except for
|
||||
* noting if they are of "=" type.
|
||||
* haven't yet found such a key for this attr. Scan keys of
|
||||
* nondefault subtypes are transferred to the output with no
|
||||
* processing except for noting if they are of "=" type.
|
||||
*/
|
||||
attno = 1;
|
||||
memset(xform, 0, sizeof(xform));
|
||||
@@ -361,7 +361,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
|
||||
/*
|
||||
* If no "=" for this key, we're done with required keys
|
||||
*/
|
||||
if (! hasOtherTypeEqual)
|
||||
if (!hasOtherTypeEqual)
|
||||
allEqualSoFar = false;
|
||||
}
|
||||
|
||||
@@ -369,8 +369,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
|
||||
if (xform[BTLessStrategyNumber - 1]
|
||||
&& xform[BTLessEqualStrategyNumber - 1])
|
||||
{
|
||||
ScanKey lt = xform[BTLessStrategyNumber - 1];
|
||||
ScanKey le = xform[BTLessEqualStrategyNumber - 1];
|
||||
ScanKey lt = xform[BTLessStrategyNumber - 1];
|
||||
ScanKey le = xform[BTLessEqualStrategyNumber - 1];
|
||||
|
||||
test = FunctionCall2(&le->sk_func,
|
||||
lt->sk_argument,
|
||||
@@ -385,8 +385,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
|
||||
if (xform[BTGreaterStrategyNumber - 1]
|
||||
&& xform[BTGreaterEqualStrategyNumber - 1])
|
||||
{
|
||||
ScanKey gt = xform[BTGreaterStrategyNumber - 1];
|
||||
ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1];
|
||||
ScanKey gt = xform[BTGreaterStrategyNumber - 1];
|
||||
ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1];
|
||||
|
||||
test = FunctionCall2(&ge->sk_func,
|
||||
gt->sk_argument,
|
||||
@@ -545,21 +545,23 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
|
||||
{
|
||||
/*
|
||||
* Tuple fails this qual. If it's a required qual, then we
|
||||
* may be able to conclude no further tuples will pass, either.
|
||||
* We have to look at the scan direction and the qual type.
|
||||
* may be able to conclude no further tuples will pass,
|
||||
* either. We have to look at the scan direction and the qual
|
||||
* type.
|
||||
*
|
||||
* Note: the only case in which we would keep going after failing
|
||||
* a required qual is if there are partially-redundant quals that
|
||||
* _bt_preprocess_keys() was unable to eliminate. For example,
|
||||
* given "x > 4 AND x > 10" where both are cross-type comparisons
|
||||
* and so not removable, we might start the scan at the x = 4
|
||||
* boundary point. The "x > 10" condition will fail until we
|
||||
* pass x = 10, but we must not stop the scan on its account.
|
||||
* a required qual is if there are partially-redundant quals
|
||||
* that _bt_preprocess_keys() was unable to eliminate. For
|
||||
* example, given "x > 4 AND x > 10" where both are cross-type
|
||||
* comparisons and so not removable, we might start the scan
|
||||
* at the x = 4 boundary point. The "x > 10" condition will
|
||||
* fail until we pass x = 10, but we must not stop the scan on
|
||||
* its account.
|
||||
*
|
||||
* Note: because we stop the scan as soon as any required equality
|
||||
* qual fails, it is critical that equality quals be used for the
|
||||
* initial positioning in _bt_first() when they are available.
|
||||
* See comments in _bt_first().
|
||||
* Note: because we stop the scan as soon as any required
|
||||
* equality qual fails, it is critical that equality quals be
|
||||
* used for the initial positioning in _bt_first() when they
|
||||
* are available. See comments in _bt_first().
|
||||
*/
|
||||
if (ikey < so->numberOfRequiredKeys)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.17 2004/08/29 04:12:21 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.18 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -770,7 +770,7 @@ static void
|
||||
out_target(char *buf, xl_btreetid *target)
|
||||
{
|
||||
sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u",
|
||||
target->node.spcNode, target->node.dbNode, target->node.relNode,
|
||||
target->node.spcNode, target->node.dbNode, target->node.relNode,
|
||||
ItemPointerGetBlockNumber(&(target->tid)),
|
||||
ItemPointerGetOffsetNumber(&(target->tid)));
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.54 2004/08/29 04:12:22 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -123,7 +123,7 @@ rtrescan(PG_FUNCTION_ARGS)
|
||||
Oid int_oper;
|
||||
RegProcedure int_proc;
|
||||
|
||||
opclass = s->indexRelation->rd_index->indclass[attno-1];
|
||||
opclass = s->indexRelation->rd_index->indclass[attno - 1];
|
||||
int_strategy = RTMapToInternalOperator(s->keyData[i].sk_strategy);
|
||||
int_oper = get_opclass_member(opclass,
|
||||
s->keyData[i].sk_subtype,
|
||||
@@ -280,14 +280,14 @@ rtdropscan(IndexScanDesc s)
|
||||
void
|
||||
ReleaseResources_rtree(void)
|
||||
{
|
||||
RTScanList l;
|
||||
RTScanList prev;
|
||||
RTScanList next;
|
||||
RTScanList l;
|
||||
RTScanList prev;
|
||||
RTScanList next;
|
||||
|
||||
/*
|
||||
* Note: this should be a no-op during normal query shutdown.
|
||||
* However, in an abort situation ExecutorEnd is not called and so
|
||||
* there may be open index scans to clean up.
|
||||
* Note: this should be a no-op during normal query shutdown. However,
|
||||
* in an abort situation ExecutorEnd is not called and so there may be
|
||||
* open index scans to clean up.
|
||||
*/
|
||||
prev = NULL;
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.24 2004/08/29 04:12:23 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.25 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -62,6 +62,7 @@
|
||||
* Link to shared-memory data structures for CLOG control
|
||||
*/
|
||||
static SlruCtlData ClogCtlData;
|
||||
|
||||
#define ClogCtl (&ClogCtlData)
|
||||
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.20 2004/08/29 04:12:23 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.21 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -79,7 +79,7 @@
|
||||
* segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
|
||||
*
|
||||
* Note: this file currently assumes that segment file names will be four
|
||||
* hex digits. This sets a lower bound on the segment size (64K transactions
|
||||
* hex digits. This sets a lower bound on the segment size (64K transactions
|
||||
* for 32-bit TransactionIds).
|
||||
*/
|
||||
#define SLRU_PAGES_PER_SEGMENT 32
|
||||
@@ -96,9 +96,9 @@
|
||||
*/
|
||||
typedef struct SlruFlushData
|
||||
{
|
||||
int num_files; /* # files actually open */
|
||||
int fd[NUM_SLRU_BUFFERS]; /* their FD's */
|
||||
int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */
|
||||
int num_files; /* # files actually open */
|
||||
int fd[NUM_SLRU_BUFFERS]; /* their FD's */
|
||||
int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */
|
||||
} SlruFlushData;
|
||||
|
||||
/*
|
||||
@@ -132,7 +132,7 @@ static int slru_errno;
|
||||
|
||||
static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno);
|
||||
static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno,
|
||||
SlruFlush fdata);
|
||||
SlruFlush fdata);
|
||||
static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid);
|
||||
static int SlruSelectLRUPage(SlruCtl ctl, int pageno);
|
||||
|
||||
@@ -385,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
|
||||
/* If we failed, and we're in a flush, better close the files */
|
||||
if (!ok && fdata)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < fdata->num_files; i++)
|
||||
close(fdata->fd[i]);
|
||||
@@ -511,7 +511,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
|
||||
*/
|
||||
if (fdata)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < fdata->num_files; i++)
|
||||
{
|
||||
@@ -527,16 +527,17 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
|
||||
{
|
||||
/*
|
||||
* If the file doesn't already exist, we should create it. It is
|
||||
* possible for this to need to happen when writing a page that's not
|
||||
* first in its segment; we assume the OS can cope with that.
|
||||
* (Note: it might seem that it'd be okay to create files only when
|
||||
* SimpleLruZeroPage is called for the first page of a segment.
|
||||
* However, if after a crash and restart the REDO logic elects to
|
||||
* replay the log from a checkpoint before the latest one, then it's
|
||||
* possible that we will get commands to set transaction status of
|
||||
* transactions that have already been truncated from the commit log.
|
||||
* Easiest way to deal with that is to accept references to
|
||||
* nonexistent files here and in SlruPhysicalReadPage.)
|
||||
* possible for this to need to happen when writing a page that's
|
||||
* not first in its segment; we assume the OS can cope with that.
|
||||
* (Note: it might seem that it'd be okay to create files only
|
||||
* when SimpleLruZeroPage is called for the first page of a
|
||||
* segment. However, if after a crash and restart the REDO logic
|
||||
* elects to replay the log from a checkpoint before the latest
|
||||
* one, then it's possible that we will get commands to set
|
||||
* transaction status of transactions that have already been
|
||||
* truncated from the commit log. Easiest way to deal with that is
|
||||
* to accept references to nonexistent files here and in
|
||||
* SlruPhysicalReadPage.)
|
||||
*/
|
||||
SlruFileName(ctl, path, segno);
|
||||
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
|
||||
@@ -648,36 +649,36 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("could not seek in file \"%s\" to offset %u: %m",
|
||||
path, offset)));
|
||||
errdetail("could not seek in file \"%s\" to offset %u: %m",
|
||||
path, offset)));
|
||||
break;
|
||||
case SLRU_READ_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("could not read from file \"%s\" at offset %u: %m",
|
||||
path, offset)));
|
||||
errdetail("could not read from file \"%s\" at offset %u: %m",
|
||||
path, offset)));
|
||||
break;
|
||||
case SLRU_WRITE_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("could not write to file \"%s\" at offset %u: %m",
|
||||
path, offset)));
|
||||
errdetail("could not write to file \"%s\" at offset %u: %m",
|
||||
path, offset)));
|
||||
break;
|
||||
case SLRU_FSYNC_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("could not fsync file \"%s\": %m",
|
||||
path)));
|
||||
errdetail("could not fsync file \"%s\": %m",
|
||||
path)));
|
||||
break;
|
||||
case SLRU_CLOSE_FAILED:
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not access status of transaction %u", xid),
|
||||
errdetail("could not close file \"%s\": %m",
|
||||
path)));
|
||||
errdetail("could not close file \"%s\": %m",
|
||||
path)));
|
||||
break;
|
||||
default:
|
||||
/* can't get here, we trust */
|
||||
@@ -841,8 +842,8 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
|
||||
/*
|
||||
* Scan shared memory and remove any pages preceding the cutoff page,
|
||||
* to ensure we won't rewrite them later. (Since this is normally
|
||||
* called in or just after a checkpoint, any dirty pages should
|
||||
* have been flushed already ... we're just being extra careful here.)
|
||||
* called in or just after a checkpoint, any dirty pages should have
|
||||
* been flushed already ... we're just being extra careful here.)
|
||||
*/
|
||||
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
|
||||
|
||||
@@ -952,8 +953,11 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
|
||||
errno = 0;
|
||||
}
|
||||
#ifdef WIN32
|
||||
/* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
|
||||
not in released version */
|
||||
|
||||
/*
|
||||
* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
|
||||
* not in released version
|
||||
*/
|
||||
if (GetLastError() == ERROR_NO_MORE_FILES)
|
||||
errno = 0;
|
||||
#endif
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
|
||||
* transaction Id for each transaction. It is a fundamental part of the
|
||||
* nested transactions implementation. A main transaction has a parent
|
||||
* nested transactions implementation. A main transaction has a parent
|
||||
* of InvalidTransactionId, and each subtransaction has its immediate parent.
|
||||
* The tree can easily be walked from child to parent, but not in the
|
||||
* opposite direction.
|
||||
@@ -22,7 +22,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.4 2004/08/29 04:12:23 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.5 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -57,6 +57,7 @@
|
||||
* Link to shared-memory data structures for SUBTRANS control
|
||||
*/
|
||||
static SlruCtlData SubTransCtlData;
|
||||
|
||||
#define SubTransCtl (&SubTransCtlData)
|
||||
|
||||
|
||||
@@ -101,7 +102,7 @@ SubTransGetParent(TransactionId xid)
|
||||
int entryno = TransactionIdToEntry(xid);
|
||||
int slotno;
|
||||
TransactionId *ptr;
|
||||
TransactionId parent;
|
||||
TransactionId parent;
|
||||
|
||||
/* Can't ask about stuff that might not be around anymore */
|
||||
Assert(TransactionIdFollowsOrEquals(xid, RecentXmin));
|
||||
@@ -139,7 +140,7 @@ TransactionId
|
||||
SubTransGetTopmostTransaction(TransactionId xid)
|
||||
{
|
||||
TransactionId parentXid = xid,
|
||||
previousXid = xid;
|
||||
previousXid = xid;
|
||||
|
||||
/* Can't ask about stuff that might not be around anymore */
|
||||
Assert(TransactionIdFollowsOrEquals(xid, RecentXmin));
|
||||
@@ -185,7 +186,7 @@ SUBTRANSShmemInit(void)
|
||||
* must have been called already.)
|
||||
*
|
||||
* Note: it's not really necessary to create the initial segment now,
|
||||
* since slru.c would create it on first write anyway. But we may as well
|
||||
* since slru.c would create it on first write anyway. But we may as well
|
||||
* do it to be sure the directory is set up correctly.
|
||||
*/
|
||||
void
|
||||
@@ -229,10 +230,11 @@ StartupSUBTRANS(void)
|
||||
int startPage;
|
||||
|
||||
/*
|
||||
* Since we don't expect pg_subtrans to be valid across crashes,
|
||||
* we initialize the currently-active page to zeroes during startup.
|
||||
* Since we don't expect pg_subtrans to be valid across crashes, we
|
||||
* initialize the currently-active page to zeroes during startup.
|
||||
* Whenever we advance into a new page, ExtendSUBTRANS will likewise
|
||||
* zero the new page without regard to whatever was previously on disk.
|
||||
* zero the new page without regard to whatever was previously on
|
||||
* disk.
|
||||
*/
|
||||
LWLockAcquire(SubtransControlLock, LW_EXCLUSIVE);
|
||||
|
||||
@@ -251,8 +253,8 @@ ShutdownSUBTRANS(void)
|
||||
/*
|
||||
* Flush dirty SUBTRANS pages to disk
|
||||
*
|
||||
* This is not actually necessary from a correctness point of view.
|
||||
* We do it merely as a debugging aid.
|
||||
* This is not actually necessary from a correctness point of view. We do
|
||||
* it merely as a debugging aid.
|
||||
*/
|
||||
SimpleLruFlush(SubTransCtl, false);
|
||||
}
|
||||
@@ -266,8 +268,8 @@ CheckPointSUBTRANS(void)
|
||||
/*
|
||||
* Flush dirty SUBTRANS pages to disk
|
||||
*
|
||||
* This is not actually necessary from a correctness point of view.
|
||||
* We do it merely to improve the odds that writing of dirty pages is done
|
||||
* This is not actually necessary from a correctness point of view. We do
|
||||
* it merely to improve the odds that writing of dirty pages is done
|
||||
* by the checkpoint process and not by backends.
|
||||
*/
|
||||
SimpleLruFlush(SubTransCtl, true);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.60 2004/08/29 04:12:23 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.61 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains the high level access-method interface to the
|
||||
@@ -126,7 +126,7 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
|
||||
static void
|
||||
TransactionLogMultiUpdate(int nxids, TransactionId *xids, XidStatus status)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
Assert(nxids != 0);
|
||||
|
||||
@@ -199,9 +199,10 @@ TransactionIdDidCommit(TransactionId transactionId)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If it's marked subcommitted, we have to check the parent recursively.
|
||||
* However, if it's older than RecentXmin, we can't look at pg_subtrans;
|
||||
* instead assume that the parent crashed without cleaning up its children.
|
||||
* If it's marked subcommitted, we have to check the parent
|
||||
* recursively. However, if it's older than RecentXmin, we can't look
|
||||
* at pg_subtrans; instead assume that the parent crashed without
|
||||
* cleaning up its children.
|
||||
*/
|
||||
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
|
||||
{
|
||||
@@ -214,7 +215,7 @@ TransactionIdDidCommit(TransactionId transactionId)
|
||||
return TransactionIdDidCommit(parentXid);
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* It's not committed.
|
||||
*/
|
||||
return false;
|
||||
@@ -247,9 +248,10 @@ TransactionIdDidAbort(TransactionId transactionId)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If it's marked subcommitted, we have to check the parent recursively.
|
||||
* However, if it's older than RecentXmin, we can't look at pg_subtrans;
|
||||
* instead assume that the parent crashed without cleaning up its children.
|
||||
* If it's marked subcommitted, we have to check the parent
|
||||
* recursively. However, if it's older than RecentXmin, we can't look
|
||||
* at pg_subtrans; instead assume that the parent crashed without
|
||||
* cleaning up its children.
|
||||
*/
|
||||
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.58 2004/08/29 04:12:23 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.59 2004/08/29 05:06:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -47,9 +47,9 @@ GetNewTransactionId(bool isSubXact)
|
||||
xid = ShmemVariableCache->nextXid;
|
||||
|
||||
/*
|
||||
* If we are allocating the first XID of a new page of the commit
|
||||
* log, zero out that commit-log page before returning. We must do
|
||||
* this while holding XidGenLock, else another xact could acquire and
|
||||
* If we are allocating the first XID of a new page of the commit log,
|
||||
* zero out that commit-log page before returning. We must do this
|
||||
* while holding XidGenLock, else another xact could acquire and
|
||||
* commit a later XID before we zero the page. Fortunately, a page of
|
||||
* the commit log holds 32K or more transactions, so we don't have to
|
||||
* do this very often.
|
||||
@@ -61,17 +61,18 @@ GetNewTransactionId(bool isSubXact)
|
||||
|
||||
/*
|
||||
* Now advance the nextXid counter. This must not happen until after
|
||||
* we have successfully completed ExtendCLOG() --- if that routine fails,
|
||||
* we want the next incoming transaction to try it again. We cannot
|
||||
* assign more XIDs until there is CLOG space for them.
|
||||
* we have successfully completed ExtendCLOG() --- if that routine
|
||||
* fails, we want the next incoming transaction to try it again. We
|
||||
* cannot assign more XIDs until there is CLOG space for them.
|
||||
*/
|
||||
TransactionIdAdvance(ShmemVariableCache->nextXid);
|
||||
|
||||
/*
|
||||
* We must store the new XID into the shared PGPROC array before releasing
|
||||
* XidGenLock. This ensures that when GetSnapshotData calls
|
||||
* We must store the new XID into the shared PGPROC array before
|
||||
* releasing XidGenLock. This ensures that when GetSnapshotData calls
|
||||
* ReadNewTransactionId, all active XIDs before the returned value of
|
||||
* nextXid are already present in PGPROC. Else we have a race condition.
|
||||
* nextXid are already present in PGPROC. Else we have a race
|
||||
* condition.
|
||||
*
|
||||
* XXX by storing xid into MyProc without acquiring SInvalLock, we are
|
||||
* relying on fetch/store of an xid to be atomic, else other backends
|
||||
@@ -86,19 +87,19 @@ GetNewTransactionId(bool isSubXact)
|
||||
*
|
||||
* A solution to the atomic-store problem would be to give each PGPROC
|
||||
* its own spinlock used only for fetching/storing that PGPROC's xid
|
||||
* and related fields. (SInvalLock would then mean primarily that
|
||||
* and related fields. (SInvalLock would then mean primarily that
|
||||
* PGPROCs couldn't be added/removed while holding the lock.)
|
||||
*
|
||||
* If there's no room to fit a subtransaction XID into PGPROC, set the
|
||||
* cache-overflowed flag instead. This forces readers to look in
|
||||
* pg_subtrans to map subtransaction XIDs up to top-level XIDs.
|
||||
* There is a race-condition window, in that the new XID will not
|
||||
* appear as running until its parent link has been placed into
|
||||
* pg_subtrans. However, that will happen before anyone could possibly
|
||||
* have a reason to inquire about the status of the XID, so it seems
|
||||
* OK. (Snapshots taken during this window *will* include the parent
|
||||
* XID, so they will deliver the correct answer later on when someone
|
||||
* does have a reason to inquire.)
|
||||
* pg_subtrans to map subtransaction XIDs up to top-level XIDs. There
|
||||
* is a race-condition window, in that the new XID will not appear as
|
||||
* running until its parent link has been placed into pg_subtrans.
|
||||
* However, that will happen before anyone could possibly have a
|
||||
* reason to inquire about the status of the XID, so it seems OK.
|
||||
* (Snapshots taken during this window *will* include the parent XID,
|
||||
* so they will deliver the correct answer later on when someone does
|
||||
* have a reason to inquire.)
|
||||
*/
|
||||
if (MyProc != NULL)
|
||||
{
|
||||
@@ -112,9 +113,7 @@ GetNewTransactionId(bool isSubXact)
|
||||
MyProc->subxids.nxids++;
|
||||
}
|
||||
else
|
||||
{
|
||||
MyProc->subxids.overflowed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.33 2004/08/29 04:12:23 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.34 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -212,11 +212,11 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
|
||||
res->reldata.rd_node = rnode;
|
||||
|
||||
/*
|
||||
* We set up the lockRelId in case anything tries to lock the dummy
|
||||
* relation. Note that this is fairly bogus since relNode may be
|
||||
* different from the relation's OID. It shouldn't really matter
|
||||
* though, since we are presumably running by ourselves and can't
|
||||
* have any lock conflicts ...
|
||||
* We set up the lockRelId in case anything tries to lock the
|
||||
* dummy relation. Note that this is fairly bogus since relNode
|
||||
* may be different from the relation's OID. It shouldn't really
|
||||
* matter though, since we are presumably running by ourselves and
|
||||
* can't have any lock conflicts ...
|
||||
*/
|
||||
res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode;
|
||||
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
|
||||
@@ -234,14 +234,15 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
|
||||
|
||||
res->reldata.rd_targblock = InvalidBlockNumber;
|
||||
res->reldata.rd_smgr = smgropen(res->reldata.rd_node);
|
||||
|
||||
/*
|
||||
* Create the target file if it doesn't already exist. This lets
|
||||
* us cope if the replay sequence contains writes to a relation
|
||||
* that is later deleted. (The original coding of this routine
|
||||
* would instead return NULL, causing the writes to be suppressed.
|
||||
* But that seems like it risks losing valuable data if the filesystem
|
||||
* loses an inode during a crash. Better to write the data until we
|
||||
* are actually told to delete the file.)
|
||||
* But that seems like it risks losing valuable data if the
|
||||
* filesystem loses an inode during a crash. Better to write the
|
||||
* data until we are actually told to delete the file.)
|
||||
*/
|
||||
smgrcreate(res->reldata.rd_smgr, res->reldata.rd_istemp, true);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.192 2004/08/29 04:12:25 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.193 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -111,46 +111,46 @@ struct typinfo
|
||||
|
||||
static const struct typinfo TypInfo[] = {
|
||||
{"bool", BOOLOID, 0, 1, true, 'c', 'p',
|
||||
F_BOOLIN, F_BOOLOUT},
|
||||
F_BOOLIN, F_BOOLOUT},
|
||||
{"bytea", BYTEAOID, 0, -1, false, 'i', 'x',
|
||||
F_BYTEAIN, F_BYTEAOUT},
|
||||
F_BYTEAIN, F_BYTEAOUT},
|
||||
{"char", CHAROID, 0, 1, true, 'c', 'p',
|
||||
F_CHARIN, F_CHAROUT},
|
||||
F_CHARIN, F_CHAROUT},
|
||||
{"name", NAMEOID, CHAROID, NAMEDATALEN, false, 'i', 'p',
|
||||
F_NAMEIN, F_NAMEOUT},
|
||||
F_NAMEIN, F_NAMEOUT},
|
||||
{"int2", INT2OID, 0, 2, true, 's', 'p',
|
||||
F_INT2IN, F_INT2OUT},
|
||||
F_INT2IN, F_INT2OUT},
|
||||
{"int4", INT4OID, 0, 4, true, 'i', 'p',
|
||||
F_INT4IN, F_INT4OUT},
|
||||
F_INT4IN, F_INT4OUT},
|
||||
{"regproc", REGPROCOID, 0, 4, true, 'i', 'p',
|
||||
F_REGPROCIN, F_REGPROCOUT},
|
||||
F_REGPROCIN, F_REGPROCOUT},
|
||||
{"regclass", REGCLASSOID, 0, 4, true, 'i', 'p',
|
||||
F_REGCLASSIN, F_REGCLASSOUT},
|
||||
F_REGCLASSIN, F_REGCLASSOUT},
|
||||
{"regtype", REGTYPEOID, 0, 4, true, 'i', 'p',
|
||||
F_REGTYPEIN, F_REGTYPEOUT},
|
||||
F_REGTYPEIN, F_REGTYPEOUT},
|
||||
{"text", TEXTOID, 0, -1, false, 'i', 'x',
|
||||
F_TEXTIN, F_TEXTOUT},
|
||||
F_TEXTIN, F_TEXTOUT},
|
||||
{"oid", OIDOID, 0, 4, true, 'i', 'p',
|
||||
F_OIDIN, F_OIDOUT},
|
||||
F_OIDIN, F_OIDOUT},
|
||||
{"tid", TIDOID, 0, 6, false, 's', 'p',
|
||||
F_TIDIN, F_TIDOUT},
|
||||
F_TIDIN, F_TIDOUT},
|
||||
{"xid", XIDOID, 0, 4, true, 'i', 'p',
|
||||
F_XIDIN, F_XIDOUT},
|
||||
F_XIDIN, F_XIDOUT},
|
||||
{"cid", CIDOID, 0, 4, true, 'i', 'p',
|
||||
F_CIDIN, F_CIDOUT},
|
||||
F_CIDIN, F_CIDOUT},
|
||||
{"int2vector", INT2VECTOROID, INT2OID, INDEX_MAX_KEYS * 2, false, 's', 'p',
|
||||
F_INT2VECTORIN, F_INT2VECTOROUT},
|
||||
F_INT2VECTORIN, F_INT2VECTOROUT},
|
||||
{"oidvector", OIDVECTOROID, OIDOID, INDEX_MAX_KEYS * 4, false, 'i', 'p',
|
||||
F_OIDVECTORIN, F_OIDVECTOROUT},
|
||||
F_OIDVECTORIN, F_OIDVECTOROUT},
|
||||
{"_int4", INT4ARRAYOID, INT4OID, -1, false, 'i', 'x',
|
||||
F_ARRAY_IN, F_ARRAY_OUT},
|
||||
F_ARRAY_IN, F_ARRAY_OUT},
|
||||
{"_text", 1009, TEXTOID, -1, false, 'i', 'x',
|
||||
F_ARRAY_IN, F_ARRAY_OUT},
|
||||
F_ARRAY_IN, F_ARRAY_OUT},
|
||||
{"_aclitem", 1034, ACLITEMOID, -1, false, 'i', 'x',
|
||||
F_ARRAY_IN, F_ARRAY_OUT}
|
||||
F_ARRAY_IN, F_ARRAY_OUT}
|
||||
};
|
||||
|
||||
static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo);
|
||||
static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo);
|
||||
|
||||
struct typmap
|
||||
{ /* a hack */
|
||||
@@ -498,13 +498,13 @@ static void
|
||||
usage(void)
|
||||
{
|
||||
write_stderr("Usage:\n"
|
||||
" postgres -boot [OPTION]... DBNAME\n"
|
||||
" -c NAME=VALUE set run-time parameter\n"
|
||||
" -d 1-5 debug level\n"
|
||||
" -D datadir data directory\n"
|
||||
" -F turn off fsync\n"
|
||||
" -o file send debug output to file\n"
|
||||
" -x num internal use\n");
|
||||
" postgres -boot [OPTION]... DBNAME\n"
|
||||
" -c NAME=VALUE set run-time parameter\n"
|
||||
" -d 1-5 debug level\n"
|
||||
" -D datadir data directory\n"
|
||||
" -F turn off fsync\n"
|
||||
" -o file send debug output to file\n"
|
||||
" -x num internal use\n");
|
||||
|
||||
proc_exit(1);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.106 2004/08/29 04:12:26 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.107 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* See acl.h.
|
||||
@@ -73,7 +73,7 @@ dumpacl(Acl *acl)
|
||||
* Determine the effective grantor ID for a GRANT or REVOKE operation.
|
||||
*
|
||||
* Ordinarily this is just the current user, but when a superuser does
|
||||
* GRANT or REVOKE, we pretend he is the object owner. This ensures that
|
||||
* GRANT or REVOKE, we pretend he is the object owner. This ensures that
|
||||
* all granted privileges appear to flow from the object owner, and there
|
||||
* are never multiple "original sources" of a privilege.
|
||||
*/
|
||||
@@ -122,25 +122,25 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
|
||||
foreach(j, grantees)
|
||||
{
|
||||
PrivGrantee *grantee = (PrivGrantee *) lfirst(j);
|
||||
AclItem aclitem;
|
||||
AclItem aclitem;
|
||||
uint32 idtype;
|
||||
Acl *newer_acl;
|
||||
|
||||
if (grantee->username)
|
||||
{
|
||||
aclitem.ai_grantee = get_usesysid(grantee->username);
|
||||
aclitem. ai_grantee = get_usesysid(grantee->username);
|
||||
|
||||
idtype = ACL_IDTYPE_UID;
|
||||
}
|
||||
else if (grantee->groupname)
|
||||
{
|
||||
aclitem.ai_grantee = get_grosysid(grantee->groupname);
|
||||
aclitem. ai_grantee = get_grosysid(grantee->groupname);
|
||||
|
||||
idtype = ACL_IDTYPE_GID;
|
||||
}
|
||||
else
|
||||
{
|
||||
aclitem.ai_grantee = ACL_ID_WORLD;
|
||||
aclitem. ai_grantee = ACL_ID_WORLD;
|
||||
|
||||
idtype = ACL_IDTYPE_WORLD;
|
||||
}
|
||||
@@ -157,18 +157,19 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
|
||||
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
|
||||
errmsg("grant options can only be granted to individual users")));
|
||||
|
||||
aclitem.ai_grantor = grantor_uid;
|
||||
aclitem. ai_grantor = grantor_uid;
|
||||
|
||||
/*
|
||||
* The asymmetry in the conditions here comes from the spec. In
|
||||
* GRANT, the grant_option flag signals WITH GRANT OPTION, which means
|
||||
* to grant both the basic privilege and its grant option. But in
|
||||
* REVOKE, plain revoke revokes both the basic privilege and its
|
||||
* grant option, while REVOKE GRANT OPTION revokes only the option.
|
||||
* GRANT, the grant_option flag signals WITH GRANT OPTION, which
|
||||
* means to grant both the basic privilege and its grant option.
|
||||
* But in REVOKE, plain revoke revokes both the basic privilege
|
||||
* and its grant option, while REVOKE GRANT OPTION revokes only
|
||||
* the option.
|
||||
*/
|
||||
ACLITEM_SET_PRIVS_IDTYPE(aclitem,
|
||||
(is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
|
||||
(!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS,
|
||||
(is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
|
||||
(!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS,
|
||||
idtype);
|
||||
|
||||
newer_acl = aclupdate(new_acl, &aclitem, modechg, owner_uid, behavior);
|
||||
@@ -318,11 +319,11 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
|
||||
|
||||
/*
|
||||
* Restrict the operation to what we can actually grant or revoke,
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't quite
|
||||
* what the spec says to do: the spec seems to want a warning only
|
||||
* if no privilege bits actually change in the ACL. In practice
|
||||
* that behavior seems much too noisy, as well as inconsistent with
|
||||
* the GRANT case.)
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't
|
||||
* quite what the spec says to do: the spec seems to want a
|
||||
* warning only if no privilege bits actually change in the ACL.
|
||||
* In practice that behavior seems much too noisy, as well as
|
||||
* inconsistent with the GRANT case.)
|
||||
*/
|
||||
this_privileges = privileges & my_goptions;
|
||||
if (stmt->is_grant)
|
||||
@@ -476,11 +477,11 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
|
||||
|
||||
/*
|
||||
* Restrict the operation to what we can actually grant or revoke,
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't quite
|
||||
* what the spec says to do: the spec seems to want a warning only
|
||||
* if no privilege bits actually change in the ACL. In practice
|
||||
* that behavior seems much too noisy, as well as inconsistent with
|
||||
* the GRANT case.)
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't
|
||||
* quite what the spec says to do: the spec seems to want a
|
||||
* warning only if no privilege bits actually change in the ACL.
|
||||
* In practice that behavior seems much too noisy, as well as
|
||||
* inconsistent with the GRANT case.)
|
||||
*/
|
||||
this_privileges = privileges & my_goptions;
|
||||
if (stmt->is_grant)
|
||||
@@ -630,11 +631,11 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
|
||||
|
||||
/*
|
||||
* Restrict the operation to what we can actually grant or revoke,
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't quite
|
||||
* what the spec says to do: the spec seems to want a warning only
|
||||
* if no privilege bits actually change in the ACL. In practice
|
||||
* that behavior seems much too noisy, as well as inconsistent with
|
||||
* the GRANT case.)
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't
|
||||
* quite what the spec says to do: the spec seems to want a
|
||||
* warning only if no privilege bits actually change in the ACL.
|
||||
* In practice that behavior seems much too noisy, as well as
|
||||
* inconsistent with the GRANT case.)
|
||||
*/
|
||||
this_privileges = privileges & my_goptions;
|
||||
if (stmt->is_grant)
|
||||
@@ -761,7 +762,7 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("language \"%s\" is not trusted", langname),
|
||||
errhint("Only superusers may use untrusted languages.")));
|
||||
errhint("Only superusers may use untrusted languages.")));
|
||||
|
||||
/*
|
||||
* Note: for now, languages are treated as owned by the bootstrap
|
||||
@@ -793,11 +794,11 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
|
||||
|
||||
/*
|
||||
* Restrict the operation to what we can actually grant or revoke,
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't quite
|
||||
* what the spec says to do: the spec seems to want a warning only
|
||||
* if no privilege bits actually change in the ACL. In practice
|
||||
* that behavior seems much too noisy, as well as inconsistent with
|
||||
* the GRANT case.)
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't
|
||||
* quite what the spec says to do: the spec seems to want a
|
||||
* warning only if no privilege bits actually change in the ACL.
|
||||
* In practice that behavior seems much too noisy, as well as
|
||||
* inconsistent with the GRANT case.)
|
||||
*/
|
||||
this_privileges = privileges & my_goptions;
|
||||
if (stmt->is_grant)
|
||||
@@ -946,11 +947,11 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
|
||||
|
||||
/*
|
||||
* Restrict the operation to what we can actually grant or revoke,
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't quite
|
||||
* what the spec says to do: the spec seems to want a warning only
|
||||
* if no privilege bits actually change in the ACL. In practice
|
||||
* that behavior seems much too noisy, as well as inconsistent with
|
||||
* the GRANT case.)
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't
|
||||
* quite what the spec says to do: the spec seems to want a
|
||||
* warning only if no privilege bits actually change in the ACL.
|
||||
* In practice that behavior seems much too noisy, as well as
|
||||
* inconsistent with the GRANT case.)
|
||||
*/
|
||||
this_privileges = privileges & my_goptions;
|
||||
if (stmt->is_grant)
|
||||
@@ -1039,8 +1040,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
|
||||
if (priv & ~((AclMode) ACL_ALL_RIGHTS_TABLESPACE))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
|
||||
errmsg("invalid privilege type %s for tablespace",
|
||||
privilege_to_string(priv))));
|
||||
errmsg("invalid privilege type %s for tablespace",
|
||||
privilege_to_string(priv))));
|
||||
privileges |= priv;
|
||||
}
|
||||
}
|
||||
@@ -1076,7 +1077,7 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("tablespace \"%s\" does not exist", spcname)));
|
||||
errmsg("tablespace \"%s\" does not exist", spcname)));
|
||||
pg_tablespace_tuple = (Form_pg_tablespace) GETSTRUCT(tuple);
|
||||
|
||||
ownerId = pg_tablespace_tuple->spcowner;
|
||||
@@ -1105,11 +1106,11 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
|
||||
|
||||
/*
|
||||
* Restrict the operation to what we can actually grant or revoke,
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't quite
|
||||
* what the spec says to do: the spec seems to want a warning only
|
||||
* if no privilege bits actually change in the ACL. In practice
|
||||
* that behavior seems much too noisy, as well as inconsistent with
|
||||
* the GRANT case.)
|
||||
* and issue a warning if appropriate. (For REVOKE this isn't
|
||||
* quite what the spec says to do: the spec seems to want a
|
||||
* warning only if no privilege bits actually change in the ACL.
|
||||
* In practice that behavior seems much too noisy, as well as
|
||||
* inconsistent with the GRANT case.)
|
||||
*/
|
||||
this_privileges = privileges & my_goptions;
|
||||
if (stmt->is_grant)
|
||||
@@ -1389,11 +1390,12 @@ pg_class_aclmask(Oid table_oid, AclId userid,
|
||||
/*
|
||||
* Deny anyone permission to update a system catalog unless
|
||||
* pg_shadow.usecatupd is set. (This is to let superusers protect
|
||||
* themselves from themselves.) Also allow it if allowSystemTableMods.
|
||||
* themselves from themselves.) Also allow it if
|
||||
* allowSystemTableMods.
|
||||
*
|
||||
* As of 7.4 we have some updatable system views; those shouldn't
|
||||
* be protected in this way. Assume the view rules can take care
|
||||
* of themselves.
|
||||
* As of 7.4 we have some updatable system views; those shouldn't be
|
||||
* protected in this way. Assume the view rules can take care of
|
||||
* themselves.
|
||||
*/
|
||||
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) &&
|
||||
IsSystemClass(classForm) &&
|
||||
@@ -1648,23 +1650,23 @@ pg_namespace_aclmask(Oid nsp_oid, AclId userid,
|
||||
return mask;
|
||||
|
||||
/*
|
||||
* If we have been assigned this namespace as a temp namespace,
|
||||
* check to make sure we have CREATE TEMP permission on the database,
|
||||
* and if so act as though we have all standard (but not GRANT OPTION)
|
||||
* If we have been assigned this namespace as a temp namespace, check
|
||||
* to make sure we have CREATE TEMP permission on the database, and if
|
||||
* so act as though we have all standard (but not GRANT OPTION)
|
||||
* permissions on the namespace. If we don't have CREATE TEMP, act as
|
||||
* though we have only USAGE (and not CREATE) rights.
|
||||
*
|
||||
* This may seem redundant given the check in InitTempTableNamespace,
|
||||
* but it really isn't since current user ID may have changed since then.
|
||||
* This may seem redundant given the check in InitTempTableNamespace, but
|
||||
* it really isn't since current user ID may have changed since then.
|
||||
* The upshot of this behavior is that a SECURITY DEFINER function can
|
||||
* create temp tables that can then be accessed (if permission is granted)
|
||||
* by code in the same session that doesn't have permissions to create
|
||||
* temp tables.
|
||||
* create temp tables that can then be accessed (if permission is
|
||||
* granted) by code in the same session that doesn't have permissions
|
||||
* to create temp tables.
|
||||
*
|
||||
* XXX Would it be safe to ereport a special error message as
|
||||
* InitTempTableNamespace does? Returning zero here means we'll get a
|
||||
* generic "permission denied for schema pg_temp_N" message, which is not
|
||||
* remarkably user-friendly.
|
||||
* generic "permission denied for schema pg_temp_N" message, which is
|
||||
* not remarkably user-friendly.
|
||||
*/
|
||||
if (isTempNamespace(nsp_oid))
|
||||
{
|
||||
@@ -1731,8 +1733,8 @@ pg_tablespace_aclmask(Oid spc_oid, AclId userid,
|
||||
AclId ownerId;
|
||||
|
||||
/*
|
||||
* Only shared relations can be stored in global space; don't let
|
||||
* even superusers override this
|
||||
* Only shared relations can be stored in global space; don't let even
|
||||
* superusers override this
|
||||
*/
|
||||
if (spc_oid == GLOBALTABLESPACE_OID && !IsBootstrapProcessingMode())
|
||||
return 0;
|
||||
@@ -1756,7 +1758,7 @@ pg_tablespace_aclmask(Oid spc_oid, AclId userid,
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("tablespace with OID %u does not exist", spc_oid)));
|
||||
errmsg("tablespace with OID %u does not exist", spc_oid)));
|
||||
|
||||
ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner;
|
||||
|
||||
@@ -2034,7 +2036,7 @@ pg_tablespace_ownercheck(Oid spc_oid, AclId userid)
|
||||
if (!HeapTupleIsValid(spctuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("tablespace with OID %u does not exist", spc_oid)));
|
||||
errmsg("tablespace with OID %u does not exist", spc_oid)));
|
||||
|
||||
spcowner = ((Form_pg_tablespace) GETSTRUCT(spctuple))->spcowner;
|
||||
|
||||
@@ -2131,7 +2133,7 @@ pg_conversion_ownercheck(Oid conv_oid, AclId userid)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("conversion with OID %u does not exist", conv_oid)));
|
||||
errmsg("conversion with OID %u does not exist", conv_oid)));
|
||||
|
||||
owner_id = ((Form_pg_conversion) GETSTRUCT(tuple))->conowner;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.38 2004/08/29 04:12:27 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.39 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -970,6 +970,7 @@ find_expr_references_walker(Node *node,
|
||||
if (var->varno <= 0 || var->varno > list_length(rtable))
|
||||
elog(ERROR, "invalid varno %d", var->varno);
|
||||
rte = rt_fetch(var->varno, rtable);
|
||||
|
||||
/*
|
||||
* A whole-row Var references no specific columns, so adds no new
|
||||
* dependency.
|
||||
@@ -995,7 +996,7 @@ find_expr_references_walker(Node *node,
|
||||
var->varattno > list_length(rte->joinaliasvars))
|
||||
elog(ERROR, "invalid varattno %d", var->varattno);
|
||||
find_expr_references_walker((Node *) list_nth(rte->joinaliasvars,
|
||||
var->varattno - 1),
|
||||
var->varattno - 1),
|
||||
context);
|
||||
list_free(context->rtables);
|
||||
context->rtables = save_rtables;
|
||||
@@ -1424,8 +1425,8 @@ getObjectDescription(const ObjectAddress *object)
|
||||
getRelationDescription(&buffer, object->objectId);
|
||||
if (object->objectSubId != 0)
|
||||
appendStringInfo(&buffer, gettext(" column %s"),
|
||||
get_relid_attribute_name(object->objectId,
|
||||
object->objectSubId));
|
||||
get_relid_attribute_name(object->objectId,
|
||||
object->objectSubId));
|
||||
break;
|
||||
|
||||
case OCLASS_PROC:
|
||||
@@ -1624,7 +1625,7 @@ getObjectDescription(const ObjectAddress *object)
|
||||
|
||||
appendStringInfo(&buffer, gettext("operator class %s for %s"),
|
||||
quote_qualified_identifier(nspname,
|
||||
NameStr(opcForm->opcname)),
|
||||
NameStr(opcForm->opcname)),
|
||||
NameStr(amForm->amname));
|
||||
|
||||
ReleaseSysCache(amTup);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.274 2004/08/29 04:12:27 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.275 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@@ -265,10 +265,10 @@ heap_create(const char *relname,
|
||||
|
||||
/*
|
||||
* Never allow a pg_class entry to explicitly specify the database's
|
||||
* default tablespace in reltablespace; force it to zero instead.
|
||||
* This ensures that if the database is cloned with a different
|
||||
* default tablespace, the pg_class entry will still match where
|
||||
* CREATE DATABASE will put the physically copied relation.
|
||||
* default tablespace in reltablespace; force it to zero instead. This
|
||||
* ensures that if the database is cloned with a different default
|
||||
* tablespace, the pg_class entry will still match where CREATE
|
||||
* DATABASE will put the physically copied relation.
|
||||
*
|
||||
* Yes, this is a bit of a hack.
|
||||
*/
|
||||
@@ -294,7 +294,8 @@ heap_create(const char *relname,
|
||||
nailme);
|
||||
|
||||
/*
|
||||
* have the storage manager create the relation's disk file, if needed.
|
||||
* have the storage manager create the relation's disk file, if
|
||||
* needed.
|
||||
*/
|
||||
if (create_storage)
|
||||
{
|
||||
@@ -980,12 +981,12 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
|
||||
|
||||
/*
|
||||
* Set the type OID to invalid. A dropped attribute's type link
|
||||
* cannot be relied on (once the attribute is dropped, the type might
|
||||
* be too). Fortunately we do not need the type row --- the only
|
||||
* really essential information is the type's typlen and typalign,
|
||||
* which are preserved in the attribute's attlen and attalign. We set
|
||||
* atttypid to zero here as a means of catching code that incorrectly
|
||||
* expects it to be valid.
|
||||
* cannot be relied on (once the attribute is dropped, the type
|
||||
* might be too). Fortunately we do not need the type row --- the
|
||||
* only really essential information is the type's typlen and
|
||||
* typalign, which are preserved in the attribute's attlen and
|
||||
* attalign. We set atttypid to zero here as a means of catching
|
||||
* code that incorrectly expects it to be valid.
|
||||
*/
|
||||
attStruct->atttypid = InvalidOid;
|
||||
|
||||
@@ -995,7 +996,10 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
|
||||
/* We don't want to keep stats for it anymore */
|
||||
attStruct->attstattarget = 0;
|
||||
|
||||
/* Change the column name to something that isn't likely to conflict */
|
||||
/*
|
||||
* Change the column name to something that isn't likely to
|
||||
* conflict
|
||||
*/
|
||||
snprintf(newattname, sizeof(newattname),
|
||||
"........pg.dropped.%d........", attnum);
|
||||
namestrcpy(&(attStruct->attname), newattname);
|
||||
@@ -1199,7 +1203,7 @@ heap_drop_with_catalog(Oid relid)
|
||||
/*
|
||||
* Flush the relation from the relcache. We want to do this before
|
||||
* starting to remove catalog entries, just to be certain that no
|
||||
* relcache entry rebuild will happen partway through. (That should
|
||||
* relcache entry rebuild will happen partway through. (That should
|
||||
* not really matter, since we don't do CommandCounterIncrement here,
|
||||
* but let's be safe.)
|
||||
*/
|
||||
@@ -1584,11 +1588,11 @@ AddRelationRawConstraints(Relation rel,
|
||||
if (pstate->p_hasSubLinks)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use subquery in check constraint")));
|
||||
errmsg("cannot use subquery in check constraint")));
|
||||
if (pstate->p_hasAggs)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_GROUPING_ERROR),
|
||||
errmsg("cannot use aggregate function in check constraint")));
|
||||
errmsg("cannot use aggregate function in check constraint")));
|
||||
|
||||
/*
|
||||
* Check name uniqueness, or generate a name if none was given.
|
||||
@@ -1614,8 +1618,8 @@ AddRelationRawConstraints(Relation rel,
|
||||
if (strcmp((char *) lfirst(cell2), ccname) == 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("check constraint \"%s\" already exists",
|
||||
ccname)));
|
||||
errmsg("check constraint \"%s\" already exists",
|
||||
ccname)));
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -1623,18 +1627,18 @@ AddRelationRawConstraints(Relation rel,
|
||||
/*
|
||||
* When generating a name, we want to create "tab_col_check"
|
||||
* for a column constraint and "tab_check" for a table
|
||||
* constraint. We no longer have any info about the
|
||||
* syntactic positioning of the constraint phrase, so we
|
||||
* approximate this by seeing whether the expression references
|
||||
* more than one column. (If the user played by the rules,
|
||||
* the result is the same...)
|
||||
* constraint. We no longer have any info about the syntactic
|
||||
* positioning of the constraint phrase, so we approximate
|
||||
* this by seeing whether the expression references more than
|
||||
* one column. (If the user played by the rules, the result
|
||||
* is the same...)
|
||||
*
|
||||
* Note: pull_var_clause() doesn't descend into sublinks,
|
||||
* but we eliminated those above; and anyway this only needs
|
||||
* to be an approximate answer.
|
||||
* Note: pull_var_clause() doesn't descend into sublinks, but we
|
||||
* eliminated those above; and anyway this only needs to be an
|
||||
* approximate answer.
|
||||
*/
|
||||
List *vars;
|
||||
char *colname;
|
||||
List *vars;
|
||||
char *colname;
|
||||
|
||||
vars = pull_var_clause(expr, false);
|
||||
|
||||
@@ -1763,7 +1767,7 @@ cookDefault(ParseState *pstate,
|
||||
if (contain_var_clause(expr))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("cannot use column references in default expression")));
|
||||
errmsg("cannot use column references in default expression")));
|
||||
|
||||
/*
|
||||
* It can't return a set either.
|
||||
@@ -1783,7 +1787,7 @@ cookDefault(ParseState *pstate,
|
||||
if (pstate->p_hasAggs)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_GROUPING_ERROR),
|
||||
errmsg("cannot use aggregate function in default expression")));
|
||||
errmsg("cannot use aggregate function in default expression")));
|
||||
|
||||
/*
|
||||
* Coerce the expression to the correct type and typmod, if given.
|
||||
@@ -2047,7 +2051,7 @@ heap_truncate_check_FKs(Relation rel)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Otherwise, must scan pg_constraint. Right now, this is a seqscan
|
||||
* Otherwise, must scan pg_constraint. Right now, this is a seqscan
|
||||
* because there is no available index on confrelid.
|
||||
*/
|
||||
fkeyRel = heap_openr(ConstraintRelationName, AccessShareLock);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.237 2004/08/29 04:12:27 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.238 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@@ -511,9 +511,10 @@ index_create(Oid heapRelationId,
|
||||
* We cannot allow indexing a shared relation after initdb (because
|
||||
* there's no way to make the entry in other databases' pg_class).
|
||||
* Unfortunately we can't distinguish initdb from a manually started
|
||||
* standalone backend (toasting of shared rels happens after the bootstrap
|
||||
* phase, so checking IsBootstrapProcessingMode() won't work). However,
|
||||
* we can at least prevent this mistake under normal multi-user operation.
|
||||
* standalone backend (toasting of shared rels happens after the
|
||||
* bootstrap phase, so checking IsBootstrapProcessingMode() won't
|
||||
* work). However, we can at least prevent this mistake under normal
|
||||
* multi-user operation.
|
||||
*/
|
||||
if (shared_relation && IsUnderPostmaster)
|
||||
ereport(ERROR,
|
||||
@@ -800,8 +801,8 @@ index_drop(Oid indexId)
|
||||
|
||||
/*
|
||||
* Close and flush the index's relcache entry, to ensure relcache
|
||||
* doesn't try to rebuild it while we're deleting catalog entries.
|
||||
* We keep the lock though.
|
||||
* doesn't try to rebuild it while we're deleting catalog entries. We
|
||||
* keep the lock though.
|
||||
*/
|
||||
index_close(userIndexRelation);
|
||||
|
||||
@@ -826,8 +827,8 @@ index_drop(Oid indexId)
|
||||
heap_close(indexRelation, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* if it has any expression columns, we might have stored
|
||||
* statistics about them.
|
||||
* if it has any expression columns, we might have stored statistics
|
||||
* about them.
|
||||
*/
|
||||
if (hasexprs)
|
||||
RemoveStatistics(indexId, 0);
|
||||
@@ -1008,7 +1009,7 @@ setRelhasindex(Oid relid, bool hasindex, bool isprimary, Oid reltoastidxid)
|
||||
|
||||
/*
|
||||
* Find the tuple to update in pg_class. In bootstrap mode we can't
|
||||
* use heap_update, so cheat and overwrite the tuple in-place. In
|
||||
* use heap_update, so cheat and overwrite the tuple in-place. In
|
||||
* normal processing, make a copy to scribble on.
|
||||
*/
|
||||
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
@@ -1122,13 +1123,13 @@ setNewRelfilenode(Relation relation)
|
||||
newrelfilenode = newoid();
|
||||
|
||||
/*
|
||||
* Find the pg_class tuple for the given relation. This is not used
|
||||
* Find the pg_class tuple for the given relation. This is not used
|
||||
* during bootstrap, so okay to use heap_update always.
|
||||
*/
|
||||
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
|
||||
tuple = SearchSysCacheCopy(RELOID,
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
ObjectIdGetDatum(RelationGetRelid(relation)),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
elog(ERROR, "could not find tuple for relation %u",
|
||||
@@ -1206,15 +1207,15 @@ UpdateStats(Oid relid, double reltuples)
|
||||
|
||||
/*
|
||||
* Find the tuple to update in pg_class. Normally we make a copy of
|
||||
* the tuple using the syscache, modify it, and apply heap_update.
|
||||
* But in bootstrap mode we can't use heap_update, so we cheat and
|
||||
* the tuple using the syscache, modify it, and apply heap_update. But
|
||||
* in bootstrap mode we can't use heap_update, so we cheat and
|
||||
* overwrite the tuple in-place.
|
||||
*
|
||||
* We also must cheat if reindexing pg_class itself, because the
|
||||
* target index may presently not be part of the set of indexes that
|
||||
* We also must cheat if reindexing pg_class itself, because the target
|
||||
* index may presently not be part of the set of indexes that
|
||||
* CatalogUpdateIndexes would update (see reindex_relation). In this
|
||||
* case the stats updates will not be WAL-logged and so could be lost
|
||||
* in a crash. This seems OK considering VACUUM does the same thing.
|
||||
* in a crash. This seems OK considering VACUUM does the same thing.
|
||||
*/
|
||||
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
|
||||
@@ -1454,7 +1455,7 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
scan = heap_beginscan(heapRelation, /* relation */
|
||||
snapshot, /* seeself */
|
||||
0, /* number of keys */
|
||||
NULL); /* scan key */
|
||||
NULL); /* scan key */
|
||||
|
||||
reltuples = 0;
|
||||
|
||||
@@ -1513,7 +1514,7 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
* system catalogs before committing.
|
||||
*/
|
||||
if (!TransactionIdIsCurrentTransactionId(
|
||||
HeapTupleHeaderGetXmin(heapTuple->t_data))
|
||||
HeapTupleHeaderGetXmin(heapTuple->t_data))
|
||||
&& !IsSystemRelation(heapRelation))
|
||||
elog(ERROR, "concurrent insert in progress");
|
||||
indexIt = true;
|
||||
@@ -1531,7 +1532,7 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
* system catalogs before committing.
|
||||
*/
|
||||
if (!TransactionIdIsCurrentTransactionId(
|
||||
HeapTupleHeaderGetXmax(heapTuple->t_data))
|
||||
HeapTupleHeaderGetXmax(heapTuple->t_data))
|
||||
&& !IsSystemRelation(heapRelation))
|
||||
elog(ERROR, "concurrent delete in progress");
|
||||
indexIt = true;
|
||||
@@ -1659,11 +1660,11 @@ reindex_index(Oid indexId)
|
||||
* Note: for REINDEX INDEX, doing this before opening the parent heap
|
||||
* relation means there's a possibility for deadlock failure against
|
||||
* another xact that is doing normal accesses to the heap and index.
|
||||
* However, it's not real clear why you'd be wanting to do REINDEX INDEX
|
||||
* on a table that's in active use, so I'd rather have the protection of
|
||||
* making sure the index is locked down. In the REINDEX TABLE and
|
||||
* REINDEX DATABASE cases, there is no problem because caller already
|
||||
* holds exclusive lock on the parent table.
|
||||
* However, it's not real clear why you'd be wanting to do REINDEX
|
||||
* INDEX on a table that's in active use, so I'd rather have the
|
||||
* protection of making sure the index is locked down. In the REINDEX
|
||||
* TABLE and REINDEX DATABASE cases, there is no problem because
|
||||
* caller already holds exclusive lock on the parent table.
|
||||
*/
|
||||
iRel = index_open(indexId);
|
||||
LockRelation(iRel, AccessExclusiveLock);
|
||||
@@ -1680,8 +1681,8 @@ reindex_index(Oid indexId)
|
||||
* we can do it the normal transaction-safe way.
|
||||
*
|
||||
* Since inplace processing isn't crash-safe, we only allow it in a
|
||||
* standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases,
|
||||
* the caller should have detected this.)
|
||||
* standalone backend. (In the REINDEX TABLE and REINDEX DATABASE
|
||||
* cases, the caller should have detected this.)
|
||||
*/
|
||||
inplace = iRel->rd_rel->relisshared;
|
||||
|
||||
@@ -1705,7 +1706,8 @@ reindex_index(Oid indexId)
|
||||
{
|
||||
/*
|
||||
* Release any buffers associated with this index. If they're
|
||||
* dirty, they're just dropped without bothering to flush to disk.
|
||||
* dirty, they're just dropped without bothering to flush to
|
||||
* disk.
|
||||
*/
|
||||
DropRelationBuffers(iRel);
|
||||
|
||||
@@ -1724,8 +1726,8 @@ reindex_index(Oid indexId)
|
||||
index_build(heapRelation, iRel, indexInfo);
|
||||
|
||||
/*
|
||||
* index_build will close both the heap and index relations (but not
|
||||
* give up the locks we hold on them). So we're done.
|
||||
* index_build will close both the heap and index relations (but
|
||||
* not give up the locks we hold on them). So we're done.
|
||||
*/
|
||||
}
|
||||
PG_CATCH();
|
||||
@@ -1774,13 +1776,13 @@ reindex_relation(Oid relid, bool toast_too)
|
||||
|
||||
/*
|
||||
* reindex_index will attempt to update the pg_class rows for the
|
||||
* relation and index. If we are processing pg_class itself, we
|
||||
* want to make sure that the updates do not try to insert index
|
||||
* entries into indexes we have not processed yet. (When we are
|
||||
* trying to recover from corrupted indexes, that could easily
|
||||
* cause a crash.) We can accomplish this because CatalogUpdateIndexes
|
||||
* will use the relcache's index list to know which indexes to update.
|
||||
* We just force the index list to be only the stuff we've processed.
|
||||
* relation and index. If we are processing pg_class itself, we want
|
||||
* to make sure that the updates do not try to insert index entries
|
||||
* into indexes we have not processed yet. (When we are trying to
|
||||
* recover from corrupted indexes, that could easily cause a crash.)
|
||||
* We can accomplish this because CatalogUpdateIndexes will use the
|
||||
* relcache's index list to know which indexes to update. We just
|
||||
* force the index list to be only the stuff we've processed.
|
||||
*
|
||||
* It is okay to not insert entries into the indexes we have not
|
||||
* processed yet because all of this is transaction-safe. If we fail
|
||||
@@ -1795,7 +1797,7 @@ reindex_relation(Oid relid, bool toast_too)
|
||||
/* Reindex all the indexes. */
|
||||
foreach(indexId, indexIds)
|
||||
{
|
||||
Oid indexOid = lfirst_oid(indexId);
|
||||
Oid indexOid = lfirst_oid(indexId);
|
||||
|
||||
if (is_pg_class)
|
||||
RelationSetIndexList(rel, doneIndexes);
|
||||
@@ -1819,8 +1821,8 @@ reindex_relation(Oid relid, bool toast_too)
|
||||
result = (indexIds != NIL);
|
||||
|
||||
/*
|
||||
* If the relation has a secondary toast rel, reindex that too while we
|
||||
* still hold the lock on the master table.
|
||||
* If the relation has a secondary toast rel, reindex that too while
|
||||
* we still hold the lock on the master table.
|
||||
*/
|
||||
if (toast_too && OidIsValid(toast_relid))
|
||||
result |= reindex_relation(toast_relid, false);
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.69 2004/08/29 04:12:28 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.70 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -170,9 +170,9 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK)
|
||||
if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
|
||||
relation->catalogname, relation->schemaname,
|
||||
relation->relname)));
|
||||
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
|
||||
relation->catalogname, relation->schemaname,
|
||||
relation->relname)));
|
||||
}
|
||||
|
||||
if (relation->schemaname)
|
||||
@@ -225,9 +225,9 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
|
||||
if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
|
||||
newRelation->catalogname, newRelation->schemaname,
|
||||
newRelation->relname)));
|
||||
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
|
||||
newRelation->catalogname, newRelation->schemaname,
|
||||
newRelation->relname)));
|
||||
}
|
||||
|
||||
if (newRelation->istemp)
|
||||
@@ -236,7 +236,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
|
||||
if (newRelation->schemaname)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
||||
errmsg("temporary tables may not specify a schema name")));
|
||||
errmsg("temporary tables may not specify a schema name")));
|
||||
/* Initialize temp namespace if first time through */
|
||||
if (!OidIsValid(myTempNamespace))
|
||||
InitTempTableNamespace();
|
||||
@@ -699,12 +699,13 @@ OpernameGetCandidates(List *names, char oprkind)
|
||||
|
||||
/*
|
||||
* In typical scenarios, most if not all of the operators found by the
|
||||
* catcache search will end up getting returned; and there can be quite
|
||||
* a few, for common operator names such as '=' or '+'. To reduce the
|
||||
* time spent in palloc, we allocate the result space as an array large
|
||||
* enough to hold all the operators. The original coding of this routine
|
||||
* did a separate palloc for each operator, but profiling revealed that
|
||||
* the pallocs used an unreasonably large fraction of parsing time.
|
||||
* catcache search will end up getting returned; and there can be
|
||||
* quite a few, for common operator names such as '=' or '+'. To
|
||||
* reduce the time spent in palloc, we allocate the result space as an
|
||||
* array large enough to hold all the operators. The original coding
|
||||
* of this routine did a separate palloc for each operator, but
|
||||
* profiling revealed that the pallocs used an unreasonably large
|
||||
* fraction of parsing time.
|
||||
*/
|
||||
#define SPACE_PER_OP MAXALIGN(sizeof(struct _FuncCandidateList) + sizeof(Oid))
|
||||
|
||||
@@ -1191,8 +1192,8 @@ DeconstructQualifiedName(List *names,
|
||||
if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cross-database references are not implemented: %s",
|
||||
NameListToString(names))));
|
||||
errmsg("cross-database references are not implemented: %s",
|
||||
NameListToString(names))));
|
||||
break;
|
||||
default:
|
||||
ereport(ERROR,
|
||||
@@ -1645,10 +1646,11 @@ InitTempTableNamespace(void)
|
||||
* tables. We use a nonstandard error message here since
|
||||
* "databasename: permission denied" might be a tad cryptic.
|
||||
*
|
||||
* Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask;
|
||||
* that's necessary since current user ID could change during the session.
|
||||
* But there's no need to make the namespace in the first place until a
|
||||
* temp table creation request is made by someone with appropriate rights.
|
||||
* Note that ACL_CREATE_TEMP rights are rechecked in
|
||||
* pg_namespace_aclmask; that's necessary since current user ID could
|
||||
* change during the session. But there's no need to make the
|
||||
* namespace in the first place until a temp table creation request is
|
||||
* made by someone with appropriate rights.
|
||||
*/
|
||||
if (pg_database_aclcheck(MyDatabaseId, GetUserId(),
|
||||
ACL_CREATE_TEMP) != ACLCHECK_OK)
|
||||
@@ -1847,7 +1849,8 @@ assign_search_path(const char *newval, bool doit, GucSource source)
|
||||
* ALTER DATABASE SET or ALTER USER SET command. It could be that
|
||||
* the intended use of the search path is for some other database,
|
||||
* so we should not error out if it mentions schemas not present
|
||||
* in the current database. We reduce the message to NOTICE instead.
|
||||
* in the current database. We reduce the message to NOTICE
|
||||
* instead.
|
||||
*/
|
||||
foreach(l, namelist)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.67 2004/08/29 04:12:28 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.68 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -78,8 +78,8 @@ AggregateCreate(const char *aggName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("cannot determine transition data type"),
|
||||
errdetail("An aggregate using \"anyarray\" or \"anyelement\" as "
|
||||
"transition type must have one of them as its base type.")));
|
||||
errdetail("An aggregate using \"anyarray\" or \"anyelement\" as "
|
||||
"transition type must have one of them as its base type.")));
|
||||
|
||||
/* handle transfn */
|
||||
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
|
||||
@@ -163,8 +163,8 @@ AggregateCreate(const char *aggName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("cannot determine result data type"),
|
||||
errdetail("An aggregate returning \"anyarray\" or \"anyelement\" "
|
||||
"must have one of them as its base type.")));
|
||||
errdetail("An aggregate returning \"anyarray\" or \"anyelement\" "
|
||||
"must have one of them as its base type.")));
|
||||
|
||||
/*
|
||||
* Everything looks okay. Try to create the pg_proc entry for the
|
||||
@@ -190,8 +190,8 @@ AggregateCreate(const char *aggName,
|
||||
PROVOLATILE_IMMUTABLE, /* volatility (not
|
||||
* needed for agg) */
|
||||
1, /* parameterCount */
|
||||
fnArgs, /* parameterTypes */
|
||||
NULL); /* parameterNames */
|
||||
fnArgs, /* parameterTypes */
|
||||
NULL); /* parameterNames */
|
||||
|
||||
/*
|
||||
* Okay to create the pg_aggregate entry.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.118 2004/08/29 04:12:29 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.119 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -44,12 +44,12 @@ Datum fmgr_c_validator(PG_FUNCTION_ARGS);
|
||||
Datum fmgr_sql_validator(PG_FUNCTION_ARGS);
|
||||
|
||||
static Datum create_parameternames_array(int parameterCount,
|
||||
const char *parameterNames[]);
|
||||
const char *parameterNames[]);
|
||||
static void sql_function_parse_error_callback(void *arg);
|
||||
static int match_prosrc_to_query(const char *prosrc, const char *queryText,
|
||||
int cursorpos);
|
||||
static int match_prosrc_to_query(const char *prosrc, const char *queryText,
|
||||
int cursorpos);
|
||||
static bool match_prosrc_to_literal(const char *prosrc, const char *literal,
|
||||
int cursorpos, int *newcursorpos);
|
||||
int cursorpos, int *newcursorpos);
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
@@ -173,7 +173,7 @@ ProcedureCreate(const char *procedureName,
|
||||
values[i++] = UInt16GetDatum(parameterCount); /* pronargs */
|
||||
values[i++] = ObjectIdGetDatum(returnType); /* prorettype */
|
||||
values[i++] = PointerGetDatum(typev); /* proargtypes */
|
||||
values[i++] = namesarray; /* proargnames */
|
||||
values[i++] = namesarray; /* proargnames */
|
||||
if (namesarray == PointerGetDatum(NULL))
|
||||
nulls[Anum_pg_proc_proargnames - 1] = 'n';
|
||||
values[i++] = DirectFunctionCall1(textin, /* prosrc */
|
||||
@@ -329,7 +329,7 @@ create_parameternames_array(int parameterCount, const char *parameterNames[])
|
||||
if (!parameterNames)
|
||||
return PointerGetDatum(NULL);
|
||||
|
||||
for (i=0; i<parameterCount; i++)
|
||||
for (i = 0; i < parameterCount; i++)
|
||||
{
|
||||
const char *s = parameterNames[i];
|
||||
|
||||
@@ -562,8 +562,9 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise assume we are returning the whole tuple. Crosschecking
|
||||
* against what the caller expects will happen at runtime.
|
||||
* Otherwise assume we are returning the whole tuple.
|
||||
* Crosschecking against what the caller expects will happen at
|
||||
* runtime.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
@@ -652,9 +653,10 @@ fmgr_c_validator(PG_FUNCTION_ARGS)
|
||||
char *probin;
|
||||
|
||||
/*
|
||||
* It'd be most consistent to skip the check if !check_function_bodies,
|
||||
* but the purpose of that switch is to be helpful for pg_dump loading,
|
||||
* and for pg_dump loading it's much better if we *do* check.
|
||||
* It'd be most consistent to skip the check if
|
||||
* !check_function_bodies, but the purpose of that switch is to be
|
||||
* helpful for pg_dump loading, and for pg_dump loading it's much
|
||||
* better if we *do* check.
|
||||
*/
|
||||
|
||||
tuple = SearchSysCache(PROCOID,
|
||||
@@ -760,10 +762,10 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
|
||||
error_context_stack = &sqlerrcontext;
|
||||
|
||||
/*
|
||||
* We can't do full prechecking of the function definition if there
|
||||
* are any polymorphic input types, because actual datatypes of
|
||||
* expression results will be unresolvable. The check will be done
|
||||
* at runtime instead.
|
||||
* We can't do full prechecking of the function definition if
|
||||
* there are any polymorphic input types, because actual datatypes
|
||||
* of expression results will be unresolvable. The check will be
|
||||
* done at runtime instead.
|
||||
*
|
||||
* We can run the text through the raw parser though; this will at
|
||||
* least catch silly syntactic errors.
|
||||
@@ -817,7 +819,7 @@ sql_function_parse_error_callback(void *arg)
|
||||
/*
|
||||
* Adjust a syntax error occurring inside the function body of a CREATE
|
||||
* FUNCTION command. This can be used by any function validator, not only
|
||||
* for SQL-language functions. It is assumed that the syntax error position
|
||||
* for SQL-language functions. It is assumed that the syntax error position
|
||||
* is initially relative to the function body string (as passed in). If
|
||||
* possible, we adjust the position to reference the original CREATE command;
|
||||
* if we can't manage that, we set up an "internal query" syntax error instead.
|
||||
@@ -832,11 +834,11 @@ function_parse_error_transpose(const char *prosrc)
|
||||
const char *queryText;
|
||||
|
||||
/*
|
||||
* Nothing to do unless we are dealing with a syntax error that has
|
||||
* a cursor position.
|
||||
* Nothing to do unless we are dealing with a syntax error that has a
|
||||
* cursor position.
|
||||
*
|
||||
* Some PLs may prefer to report the error position as an internal
|
||||
* error to begin with, so check that too.
|
||||
* Some PLs may prefer to report the error position as an internal error
|
||||
* to begin with, so check that too.
|
||||
*/
|
||||
origerrposition = geterrposition();
|
||||
if (origerrposition <= 0)
|
||||
@@ -891,17 +893,17 @@ match_prosrc_to_query(const char *prosrc, const char *queryText,
|
||||
* (though not in any very probable scenarios), so fail if we find
|
||||
* more than one match.
|
||||
*/
|
||||
int prosrclen = strlen(prosrc);
|
||||
int querylen = strlen(queryText);
|
||||
int matchpos = 0;
|
||||
int curpos;
|
||||
int newcursorpos;
|
||||
int prosrclen = strlen(prosrc);
|
||||
int querylen = strlen(queryText);
|
||||
int matchpos = 0;
|
||||
int curpos;
|
||||
int newcursorpos;
|
||||
|
||||
for (curpos = 0; curpos < querylen-prosrclen; curpos++)
|
||||
for (curpos = 0; curpos < querylen - prosrclen; curpos++)
|
||||
{
|
||||
if (queryText[curpos] == '$' &&
|
||||
strncmp(prosrc, &queryText[curpos+1], prosrclen) == 0 &&
|
||||
queryText[curpos+1+prosrclen] == '$')
|
||||
strncmp(prosrc, &queryText[curpos + 1], prosrclen) == 0 &&
|
||||
queryText[curpos + 1 + prosrclen] == '$')
|
||||
{
|
||||
/*
|
||||
* Found a $foo$ match. Since there are no embedded quoting
|
||||
@@ -910,20 +912,21 @@ match_prosrc_to_query(const char *prosrc, const char *queryText,
|
||||
*/
|
||||
if (matchpos)
|
||||
return 0; /* multiple matches, fail */
|
||||
matchpos = pg_mbstrlen_with_len(queryText, curpos+1)
|
||||
matchpos = pg_mbstrlen_with_len(queryText, curpos + 1)
|
||||
+ cursorpos;
|
||||
}
|
||||
else if (queryText[curpos] == '\'' &&
|
||||
match_prosrc_to_literal(prosrc, &queryText[curpos+1],
|
||||
match_prosrc_to_literal(prosrc, &queryText[curpos + 1],
|
||||
cursorpos, &newcursorpos))
|
||||
{
|
||||
/*
|
||||
* Found a 'foo' match. match_prosrc_to_literal() has adjusted
|
||||
* for any quotes or backslashes embedded in the literal.
|
||||
* Found a 'foo' match. match_prosrc_to_literal() has
|
||||
* adjusted for any quotes or backslashes embedded in the
|
||||
* literal.
|
||||
*/
|
||||
if (matchpos)
|
||||
return 0; /* multiple matches, fail */
|
||||
matchpos = pg_mbstrlen_with_len(queryText, curpos+1)
|
||||
matchpos = pg_mbstrlen_with_len(queryText, curpos + 1)
|
||||
+ newcursorpos;
|
||||
}
|
||||
}
|
||||
@@ -948,15 +951,16 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
|
||||
|
||||
/*
|
||||
* This implementation handles backslashes and doubled quotes in the
|
||||
* string literal. It does not handle the SQL syntax for literals
|
||||
* string literal. It does not handle the SQL syntax for literals
|
||||
* continued across line boundaries.
|
||||
*
|
||||
* We do the comparison a character at a time, not a byte at a time,
|
||||
* so that we can do the correct cursorpos math.
|
||||
* We do the comparison a character at a time, not a byte at a time, so
|
||||
* that we can do the correct cursorpos math.
|
||||
*/
|
||||
while (*prosrc)
|
||||
{
|
||||
cursorpos--; /* characters left before cursor */
|
||||
|
||||
/*
|
||||
* Check for backslashes and doubled quotes in the literal; adjust
|
||||
* newcp when one is found before the cursor.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.95 2004/08/29 04:12:29 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.96 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -201,8 +201,8 @@ TypeCreate(const char *typeName,
|
||||
(internalSize <= 0 || internalSize > (int16) sizeof(Datum)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("internal size %d is invalid for passed-by-value type",
|
||||
internalSize)));
|
||||
errmsg("internal size %d is invalid for passed-by-value type",
|
||||
internalSize)));
|
||||
|
||||
/* Only varlena types can be toasted */
|
||||
if (storage != 'p' && internalSize != -1)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.20 2004/08/29 04:12:29 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.21 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@@ -223,9 +223,9 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
|
||||
|
||||
/*
|
||||
* if a basetype is passed in, then attempt to find an aggregate for
|
||||
* that specific type; else attempt to find an aggregate with a basetype
|
||||
* of ANYOID. This means that the aggregate applies to all basetypes
|
||||
* (eg, COUNT).
|
||||
* that specific type; else attempt to find an aggregate with a
|
||||
* basetype of ANYOID. This means that the aggregate applies to all
|
||||
* basetypes (eg, COUNT).
|
||||
*/
|
||||
if (basetype)
|
||||
basetypeOid = typenameTypeId(basetype);
|
||||
@@ -302,9 +302,9 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId)
|
||||
|
||||
/*
|
||||
* if a basetype is passed in, then attempt to find an aggregate for
|
||||
* that specific type; else attempt to find an aggregate with a basetype
|
||||
* of ANYOID. This means that the aggregate applies to all basetypes
|
||||
* (eg, COUNT).
|
||||
* that specific type; else attempt to find an aggregate with a
|
||||
* basetype of ANYOID. This means that the aggregate applies to all
|
||||
* basetypes (eg, COUNT).
|
||||
*/
|
||||
if (basetype)
|
||||
basetypeOid = typenameTypeId(basetype);
|
||||
@@ -322,7 +322,7 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId)
|
||||
elog(ERROR, "cache lookup failed for function %u", procOid);
|
||||
procForm = (Form_pg_proc) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -334,7 +334,10 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId)
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to change owner")));
|
||||
|
||||
/* Modify the owner --- okay to scribble on tup because it's a copy */
|
||||
/*
|
||||
* Modify the owner --- okay to scribble on tup because it's a
|
||||
* copy
|
||||
*/
|
||||
procForm->proowner = newOwnerSysId;
|
||||
|
||||
simple_heap_update(rel, &tup->t_self, tup);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.10 2004/08/29 04:12:29 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.11 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
|
||||
/*
|
||||
* Executes an ALTER OBJECT / RENAME TO statement. Based on the object
|
||||
* Executes an ALTER OBJECT / RENAME TO statement. Based on the object
|
||||
* type, the function appropriate to that type is executed.
|
||||
*/
|
||||
void
|
||||
@@ -153,7 +153,7 @@ ExecRenameStmt(RenameStmt *stmt)
|
||||
void
|
||||
ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
|
||||
{
|
||||
AclId newowner = get_usesysid(stmt->newowner);
|
||||
AclId newowner = get_usesysid(stmt->newowner);
|
||||
|
||||
switch (stmt->objectType)
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.75 2004/08/29 04:12:29 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.76 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -42,9 +42,9 @@
|
||||
/* Data structure for Algorithm S from Knuth 3.4.2 */
|
||||
typedef struct
|
||||
{
|
||||
BlockNumber N; /* number of blocks, known in advance */
|
||||
BlockNumber N; /* number of blocks, known in advance */
|
||||
int n; /* desired sample size */
|
||||
BlockNumber t; /* current block number */
|
||||
BlockNumber t; /* current block number */
|
||||
int m; /* blocks selected so far */
|
||||
} BlockSamplerData;
|
||||
typedef BlockSamplerData *BlockSampler;
|
||||
@@ -68,13 +68,13 @@ static MemoryContext anl_context = NULL;
|
||||
|
||||
|
||||
static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
|
||||
int samplesize);
|
||||
int samplesize);
|
||||
static bool BlockSampler_HasMore(BlockSampler bs);
|
||||
static BlockNumber BlockSampler_Next(BlockSampler bs);
|
||||
static void compute_index_stats(Relation onerel, double totalrows,
|
||||
AnlIndexData *indexdata, int nindexes,
|
||||
HeapTuple *rows, int numrows,
|
||||
MemoryContext col_context);
|
||||
AnlIndexData *indexdata, int nindexes,
|
||||
HeapTuple *rows, int numrows,
|
||||
MemoryContext col_context);
|
||||
static VacAttrStats *examine_attribute(Relation onerel, int attnum);
|
||||
static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
|
||||
int targrows, double *totalrows);
|
||||
@@ -157,9 +157,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that it's a plain table; we used to do this in
|
||||
* get_rel_oids() but seems safer to check after we've locked the
|
||||
* relation.
|
||||
* Check that it's a plain table; we used to do this in get_rel_oids()
|
||||
* but seems safer to check after we've locked the relation.
|
||||
*/
|
||||
if (onerel->rd_rel->relkind != RELKIND_RELATION)
|
||||
{
|
||||
@@ -239,9 +238,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
}
|
||||
|
||||
/*
|
||||
* Open all indexes of the relation, and see if there are any analyzable
|
||||
* columns in the indexes. We do not analyze index columns if there was
|
||||
* an explicit column list in the ANALYZE command, however.
|
||||
* Open all indexes of the relation, and see if there are any
|
||||
* analyzable columns in the indexes. We do not analyze index columns
|
||||
* if there was an explicit column list in the ANALYZE command,
|
||||
* however.
|
||||
*/
|
||||
vac_open_indexes(onerel, &nindexes, &Irel);
|
||||
hasindex = (nindexes > 0);
|
||||
@@ -253,10 +253,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
for (ind = 0; ind < nindexes; ind++)
|
||||
{
|
||||
AnlIndexData *thisdata = &indexdata[ind];
|
||||
IndexInfo *indexInfo;
|
||||
IndexInfo *indexInfo;
|
||||
|
||||
thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]);
|
||||
thisdata->tupleFract = 1.0; /* fix later if partial */
|
||||
thisdata->tupleFract = 1.0; /* fix later if partial */
|
||||
if (indexInfo->ii_Expressions != NIL && vacstmt->va_cols == NIL)
|
||||
{
|
||||
ListCell *indexpr_item = list_head(indexInfo->ii_Expressions);
|
||||
@@ -273,25 +273,26 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
/* Found an index expression */
|
||||
Node *indexkey;
|
||||
|
||||
if (indexpr_item == NULL) /* shouldn't happen */
|
||||
if (indexpr_item == NULL) /* shouldn't happen */
|
||||
elog(ERROR, "too few entries in indexprs list");
|
||||
indexkey = (Node *) lfirst(indexpr_item);
|
||||
indexpr_item = lnext(indexpr_item);
|
||||
|
||||
/*
|
||||
* Can't analyze if the opclass uses a storage type
|
||||
* different from the expression result type. We'd
|
||||
* get confused because the type shown in pg_attribute
|
||||
* for the index column doesn't match what we are
|
||||
* getting from the expression. Perhaps this can be
|
||||
* fixed someday, but for now, punt.
|
||||
* Can't analyze if the opclass uses a storage
|
||||
* type different from the expression result type.
|
||||
* We'd get confused because the type shown in
|
||||
* pg_attribute for the index column doesn't match
|
||||
* what we are getting from the expression.
|
||||
* Perhaps this can be fixed someday, but for now,
|
||||
* punt.
|
||||
*/
|
||||
if (exprType(indexkey) !=
|
||||
Irel[ind]->rd_att->attrs[i]->atttypid)
|
||||
continue;
|
||||
|
||||
thisdata->vacattrstats[tcnt] =
|
||||
examine_attribute(Irel[ind], i+1);
|
||||
examine_attribute(Irel[ind], i + 1);
|
||||
if (thisdata->vacattrstats[tcnt] != NULL)
|
||||
{
|
||||
tcnt++;
|
||||
@@ -401,10 +402,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
|
||||
/*
|
||||
* If we are running a standalone ANALYZE, update pages/tuples stats
|
||||
* in pg_class. We know the accurate page count from the smgr,
|
||||
* but only an approximate number of tuples; therefore, if we are part
|
||||
* of VACUUM ANALYZE do *not* overwrite the accurate count already
|
||||
* inserted by VACUUM. The same consideration applies to indexes.
|
||||
* in pg_class. We know the accurate page count from the smgr, but
|
||||
* only an approximate number of tuples; therefore, if we are part of
|
||||
* VACUUM ANALYZE do *not* overwrite the accurate count already
|
||||
* inserted by VACUUM. The same consideration applies to indexes.
|
||||
*/
|
||||
if (!vacstmt->vacuum)
|
||||
{
|
||||
@@ -446,7 +447,7 @@ compute_index_stats(Relation onerel, double totalrows,
|
||||
MemoryContext col_context)
|
||||
{
|
||||
MemoryContext ind_context,
|
||||
old_context;
|
||||
old_context;
|
||||
TupleDesc heapDescriptor;
|
||||
Datum attdata[INDEX_MAX_KEYS];
|
||||
char nulls[INDEX_MAX_KEYS];
|
||||
@@ -465,7 +466,7 @@ compute_index_stats(Relation onerel, double totalrows,
|
||||
for (ind = 0; ind < nindexes; ind++)
|
||||
{
|
||||
AnlIndexData *thisdata = &indexdata[ind];
|
||||
IndexInfo *indexInfo = thisdata->indexInfo;
|
||||
IndexInfo *indexInfo = thisdata->indexInfo;
|
||||
int attr_cnt = thisdata->attr_cnt;
|
||||
TupleTable tupleTable;
|
||||
TupleTableSlot *slot;
|
||||
@@ -526,8 +527,9 @@ compute_index_stats(Relation onerel, double totalrows,
|
||||
if (attr_cnt > 0)
|
||||
{
|
||||
/*
|
||||
* Evaluate the index row to compute expression values.
|
||||
* We could do this by hand, but FormIndexDatum is convenient.
|
||||
* Evaluate the index row to compute expression values. We
|
||||
* could do this by hand, but FormIndexDatum is
|
||||
* convenient.
|
||||
*/
|
||||
FormIndexDatum(indexInfo,
|
||||
heapTuple,
|
||||
@@ -535,16 +537,17 @@ compute_index_stats(Relation onerel, double totalrows,
|
||||
estate,
|
||||
attdata,
|
||||
nulls);
|
||||
|
||||
/*
|
||||
* Save just the columns we care about.
|
||||
*/
|
||||
for (i = 0; i < attr_cnt; i++)
|
||||
{
|
||||
VacAttrStats *stats = thisdata->vacattrstats[i];
|
||||
int attnum = stats->attr->attnum;
|
||||
int attnum = stats->attr->attnum;
|
||||
|
||||
exprvals[tcnt] = attdata[attnum-1];
|
||||
exprnulls[tcnt] = (nulls[attnum-1] == 'n');
|
||||
exprvals[tcnt] = attdata[attnum - 1];
|
||||
exprnulls[tcnt] = (nulls[attnum - 1] == 'n');
|
||||
tcnt++;
|
||||
}
|
||||
}
|
||||
@@ -552,7 +555,8 @@ compute_index_stats(Relation onerel, double totalrows,
|
||||
|
||||
/*
|
||||
* Having counted the number of rows that pass the predicate in
|
||||
* the sample, we can estimate the total number of rows in the index.
|
||||
* the sample, we can estimate the total number of rows in the
|
||||
* index.
|
||||
*/
|
||||
thisdata->tupleFract = (double) numindexrows / (double) numrows;
|
||||
totalindexrows = ceil(thisdata->tupleFract * totalrows);
|
||||
@@ -630,7 +634,7 @@ examine_attribute(Relation onerel, int attnum)
|
||||
stats->tupattnum = attnum;
|
||||
|
||||
/*
|
||||
* Call the type-specific typanalyze function. If none is specified,
|
||||
* Call the type-specific typanalyze function. If none is specified,
|
||||
* use std_typanalyze().
|
||||
*/
|
||||
if (OidIsValid(stats->attrtype->typanalyze))
|
||||
@@ -667,10 +671,10 @@ static void
|
||||
BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize)
|
||||
{
|
||||
bs->N = nblocks; /* measured table size */
|
||||
|
||||
/*
|
||||
* If we decide to reduce samplesize for tables that have less or
|
||||
* not much more than samplesize blocks, here is the place to do
|
||||
* it.
|
||||
* If we decide to reduce samplesize for tables that have less or not
|
||||
* much more than samplesize blocks, here is the place to do it.
|
||||
*/
|
||||
bs->n = samplesize;
|
||||
bs->t = 0; /* blocks scanned so far */
|
||||
@@ -686,10 +690,10 @@ BlockSampler_HasMore(BlockSampler bs)
|
||||
static BlockNumber
|
||||
BlockSampler_Next(BlockSampler bs)
|
||||
{
|
||||
BlockNumber K = bs->N - bs->t; /* remaining blocks */
|
||||
BlockNumber K = bs->N - bs->t; /* remaining blocks */
|
||||
int k = bs->n - bs->m; /* blocks still to sample */
|
||||
double p; /* probability to skip block */
|
||||
double V; /* random */
|
||||
double p; /* probability to skip block */
|
||||
double V; /* random */
|
||||
|
||||
Assert(BlockSampler_HasMore(bs)); /* hence K > 0 and k > 0 */
|
||||
|
||||
@@ -706,7 +710,7 @@ BlockSampler_Next(BlockSampler bs)
|
||||
* If we are to skip, we should advance t (hence decrease K), and
|
||||
* repeat the same probabilistic test for the next block. The naive
|
||||
* implementation thus requires a random_fract() call for each block
|
||||
* number. But we can reduce this to one random_fract() call per
|
||||
* number. But we can reduce this to one random_fract() call per
|
||||
* selected block, by noting that each time the while-test succeeds,
|
||||
* we can reinterpret V as a uniform random number in the range 0 to p.
|
||||
* Therefore, instead of choosing a new V, we just adjust p to be
|
||||
@@ -770,11 +774,11 @@ static int
|
||||
acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
double *totalrows)
|
||||
{
|
||||
int numrows = 0; /* # rows collected */
|
||||
double liverows = 0; /* # rows seen */
|
||||
int numrows = 0; /* # rows collected */
|
||||
double liverows = 0; /* # rows seen */
|
||||
double deadrows = 0;
|
||||
double rowstoskip = -1; /* -1 means not set yet */
|
||||
BlockNumber totalblocks;
|
||||
double rowstoskip = -1; /* -1 means not set yet */
|
||||
BlockNumber totalblocks;
|
||||
BlockSamplerData bs;
|
||||
double rstate;
|
||||
|
||||
@@ -826,14 +830,13 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
{
|
||||
/*
|
||||
* The first targrows live rows are simply copied into the
|
||||
* reservoir.
|
||||
* Then we start replacing tuples in the sample until
|
||||
* we reach the end of the relation. This algorithm is
|
||||
* from Jeff Vitter's paper (see full citation below).
|
||||
* reservoir. Then we start replacing tuples in the sample
|
||||
* until we reach the end of the relation. This algorithm
|
||||
* is from Jeff Vitter's paper (see full citation below).
|
||||
* It works by repeatedly computing the number of tuples
|
||||
* to skip before selecting a tuple, which replaces a
|
||||
* randomly chosen element of the reservoir (current
|
||||
* set of tuples). At all times the reservoir is a true
|
||||
* randomly chosen element of the reservoir (current set
|
||||
* of tuples). At all times the reservoir is a true
|
||||
* random sample of the tuples we've passed over so far,
|
||||
* so when we fall off the end of the relation we're done.
|
||||
*/
|
||||
@@ -842,10 +845,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* t in Vitter's paper is the number of records already
|
||||
* processed. If we need to compute a new S value, we
|
||||
* must use the not-yet-incremented value of liverows
|
||||
* as t.
|
||||
* t in Vitter's paper is the number of records
|
||||
* already processed. If we need to compute a new S
|
||||
* value, we must use the not-yet-incremented value of
|
||||
* liverows as t.
|
||||
*/
|
||||
if (rowstoskip < 0)
|
||||
rowstoskip = get_next_S(liverows, targrows, &rstate);
|
||||
@@ -853,10 +856,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
if (rowstoskip <= 0)
|
||||
{
|
||||
/*
|
||||
* Found a suitable tuple, so save it,
|
||||
* replacing one old tuple at random
|
||||
* Found a suitable tuple, so save it, replacing
|
||||
* one old tuple at random
|
||||
*/
|
||||
int k = (int) (targrows * random_fract());
|
||||
int k = (int) (targrows * random_fract());
|
||||
|
||||
Assert(k >= 0 && k < targrows);
|
||||
heap_freetuple(rows[k]);
|
||||
@@ -874,9 +877,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Count dead rows, but not empty slots. This information is
|
||||
* currently not used, but it seems likely we'll want it
|
||||
* someday.
|
||||
* Count dead rows, but not empty slots. This information
|
||||
* is currently not used, but it seems likely we'll want
|
||||
* it someday.
|
||||
*/
|
||||
if (targtuple.t_data != NULL)
|
||||
deadrows += 1;
|
||||
@@ -888,12 +891,12 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
}
|
||||
|
||||
/*
|
||||
* If we didn't find as many tuples as we wanted then we're done.
|
||||
* No sort is needed, since they're already in order.
|
||||
* If we didn't find as many tuples as we wanted then we're done. No
|
||||
* sort is needed, since they're already in order.
|
||||
*
|
||||
* Otherwise we need to sort the collected tuples by position
|
||||
* (itempointer). It's not worth worrying about corner cases
|
||||
* where the tuples are already sorted.
|
||||
* (itempointer). It's not worth worrying about corner cases where
|
||||
* the tuples are already sorted.
|
||||
*/
|
||||
if (numrows == targrows)
|
||||
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
|
||||
@@ -907,7 +910,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
|
||||
*totalrows = 0.0;
|
||||
|
||||
/*
|
||||
* Emit some interesting relation info
|
||||
* Emit some interesting relation info
|
||||
*/
|
||||
ereport(elevel,
|
||||
(errmsg("\"%s\": scanned %d of %u pages, "
|
||||
@@ -1128,10 +1131,10 @@ update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats)
|
||||
|
||||
i = 0;
|
||||
values[i++] = ObjectIdGetDatum(relid); /* starelid */
|
||||
values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */
|
||||
values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
|
||||
values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */
|
||||
values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
|
||||
values[i++] = Int32GetDatum(stats->stawidth); /* stawidth */
|
||||
values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
|
||||
values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
|
||||
for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
|
||||
{
|
||||
values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */
|
||||
@@ -1305,13 +1308,13 @@ static int *datumCmpTupnoLink;
|
||||
|
||||
|
||||
static void compute_minimal_stats(VacAttrStatsP stats,
|
||||
AnalyzeAttrFetchFunc fetchfunc,
|
||||
int samplerows,
|
||||
double totalrows);
|
||||
AnalyzeAttrFetchFunc fetchfunc,
|
||||
int samplerows,
|
||||
double totalrows);
|
||||
static void compute_scalar_stats(VacAttrStatsP stats,
|
||||
AnalyzeAttrFetchFunc fetchfunc,
|
||||
int samplerows,
|
||||
double totalrows);
|
||||
AnalyzeAttrFetchFunc fetchfunc,
|
||||
int samplerows,
|
||||
double totalrows);
|
||||
static int compare_scalars(const void *a, const void *b);
|
||||
static int compare_mcvs(const void *a, const void *b);
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.114 2004/08/29 04:12:29 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.115 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -106,7 +106,8 @@
|
||||
*/
|
||||
static List *pendingNotifies = NIL;
|
||||
|
||||
static List *upperPendingNotifies = NIL; /* list of upper-xact lists */
|
||||
static List *upperPendingNotifies = NIL; /* list of upper-xact
|
||||
* lists */
|
||||
|
||||
/*
|
||||
* State for inbound notifies consists of two flags: one saying whether
|
||||
@@ -524,25 +525,27 @@ AtCommit_Notify(void)
|
||||
|
||||
rTuple = heap_modifytuple(lTuple, lRel,
|
||||
value, nulls, repl);
|
||||
|
||||
/*
|
||||
* We cannot use simple_heap_update here because the tuple
|
||||
* could have been modified by an uncommitted transaction;
|
||||
* specifically, since UNLISTEN releases exclusive lock on
|
||||
* the table before commit, the other guy could already have
|
||||
* tried to unlisten. There are no other cases where we
|
||||
* should be able to see an uncommitted update or delete.
|
||||
* Therefore, our response to a HeapTupleBeingUpdated result
|
||||
* is just to ignore it. We do *not* wait for the other
|
||||
* guy to commit --- that would risk deadlock, and we don't
|
||||
* want to block while holding the table lock anyway for
|
||||
* performance reasons. We also ignore HeapTupleUpdated,
|
||||
* which could occur if the other guy commits between our
|
||||
* heap_getnext and heap_update calls.
|
||||
* the table before commit, the other guy could already
|
||||
* have tried to unlisten. There are no other cases where
|
||||
* we should be able to see an uncommitted update or
|
||||
* delete. Therefore, our response to a
|
||||
* HeapTupleBeingUpdated result is just to ignore it. We
|
||||
* do *not* wait for the other guy to commit --- that
|
||||
* would risk deadlock, and we don't want to block while
|
||||
* holding the table lock anyway for performance reasons.
|
||||
* We also ignore HeapTupleUpdated, which could occur if
|
||||
* the other guy commits between our heap_getnext and
|
||||
* heap_update calls.
|
||||
*/
|
||||
result = heap_update(lRel, &lTuple->t_self, rTuple,
|
||||
&ctid,
|
||||
GetCurrentCommandId(), SnapshotAny,
|
||||
false /* no wait for commit */);
|
||||
false /* no wait for commit */ );
|
||||
switch (result)
|
||||
{
|
||||
case HeapTupleSelfUpdated:
|
||||
@@ -620,7 +623,7 @@ AtAbort_Notify(void)
|
||||
void
|
||||
AtSubStart_Notify(void)
|
||||
{
|
||||
MemoryContext old_cxt;
|
||||
MemoryContext old_cxt;
|
||||
|
||||
/* Keep the list-of-lists in TopTransactionContext for simplicity */
|
||||
old_cxt = MemoryContextSwitchTo(TopTransactionContext);
|
||||
@@ -640,13 +643,14 @@ AtSubStart_Notify(void)
|
||||
void
|
||||
AtSubCommit_Notify(void)
|
||||
{
|
||||
List *parentPendingNotifies;
|
||||
List *parentPendingNotifies;
|
||||
|
||||
parentPendingNotifies = (List *) linitial(upperPendingNotifies);
|
||||
upperPendingNotifies = list_delete_first(upperPendingNotifies);
|
||||
|
||||
/*
|
||||
* We could try to eliminate duplicates here, but it seems not worthwhile.
|
||||
* We could try to eliminate duplicates here, but it seems not
|
||||
* worthwhile.
|
||||
*/
|
||||
pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies);
|
||||
}
|
||||
@@ -836,7 +840,7 @@ EnableNotifyInterrupt(void)
|
||||
bool
|
||||
DisableNotifyInterrupt(void)
|
||||
{
|
||||
bool result = (notifyInterruptEnabled != 0);
|
||||
bool result = (notifyInterruptEnabled != 0);
|
||||
|
||||
notifyInterruptEnabled = 0;
|
||||
|
||||
@@ -914,11 +918,12 @@ ProcessIncomingNotify(void)
|
||||
relname, (int) sourcePID);
|
||||
|
||||
NotifyMyFrontEnd(relname, sourcePID);
|
||||
|
||||
/*
|
||||
* Rewrite the tuple with 0 in notification column.
|
||||
*
|
||||
* simple_heap_update is safe here because no one else would
|
||||
* have tried to UNLISTEN us, so there can be no uncommitted
|
||||
* simple_heap_update is safe here because no one else would have
|
||||
* tried to UNLISTEN us, so there can be no uncommitted
|
||||
* changes.
|
||||
*/
|
||||
rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl);
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.128 2004/08/29 04:12:29 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.129 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -286,8 +286,8 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
|
||||
/*
|
||||
* We grab exclusive access to the target rel and index for the
|
||||
* duration of the transaction. (This is redundant for the single-
|
||||
* transaction case, since cluster() already did it.) The index
|
||||
* lock is taken inside check_index_is_clusterable.
|
||||
* transaction case, since cluster() already did it.) The index lock
|
||||
* is taken inside check_index_is_clusterable.
|
||||
*/
|
||||
OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock);
|
||||
|
||||
@@ -391,7 +391,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid)
|
||||
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot cluster temporary tables of other sessions")));
|
||||
errmsg("cannot cluster temporary tables of other sessions")));
|
||||
|
||||
/* Drop relcache refcnt on OldIndex, but keep lock */
|
||||
index_close(OldIndex);
|
||||
@@ -438,7 +438,7 @@ mark_index_clustered(Relation rel, Oid indexOid)
|
||||
|
||||
foreach(index, RelationGetIndexList(rel))
|
||||
{
|
||||
Oid thisIndexOid = lfirst_oid(index);
|
||||
Oid thisIndexOid = lfirst_oid(index);
|
||||
|
||||
indexTuple = SearchSysCacheCopy(INDEXRELID,
|
||||
ObjectIdGetDatum(thisIndexOid),
|
||||
@@ -540,8 +540,8 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
|
||||
/* performDeletion does CommandCounterIncrement at end */
|
||||
|
||||
/*
|
||||
* Rebuild each index on the relation (but not the toast table,
|
||||
* which is all-new at this point). We do not need
|
||||
* Rebuild each index on the relation (but not the toast table, which
|
||||
* is all-new at this point). We do not need
|
||||
* CommandCounterIncrement() because reindex_relation does it.
|
||||
*/
|
||||
reindex_relation(tableOid, false);
|
||||
@@ -569,7 +569,7 @@ make_new_heap(Oid OIDOldHeap, const char *NewName, Oid NewTableSpace)
|
||||
|
||||
OIDNewHeap = heap_create_with_catalog(NewName,
|
||||
RelationGetNamespace(OldHeap),
|
||||
NewTableSpace,
|
||||
NewTableSpace,
|
||||
tupdesc,
|
||||
OldHeap->rd_rel->relkind,
|
||||
OldHeap->rd_rel->relisshared,
|
||||
@@ -745,8 +745,8 @@ swap_relation_files(Oid r1, Oid r2)
|
||||
* their new owning relations. Otherwise the wrong one will get
|
||||
* dropped ...
|
||||
*
|
||||
* NOTE: it is possible that only one table has a toast table; this
|
||||
* can happen in CLUSTER if there were dropped columns in the old table,
|
||||
* NOTE: it is possible that only one table has a toast table; this can
|
||||
* happen in CLUSTER if there were dropped columns in the old table,
|
||||
* and in ALTER TABLE when adding or changing type of columns.
|
||||
*
|
||||
* NOTE: at present, a TOAST table's only dependency is the one on its
|
||||
@@ -802,15 +802,15 @@ swap_relation_files(Oid r1, Oid r2)
|
||||
/*
|
||||
* Blow away the old relcache entries now. We need this kluge because
|
||||
* relcache.c keeps a link to the smgr relation for the physical file,
|
||||
* and that will be out of date as soon as we do CommandCounterIncrement.
|
||||
* Whichever of the rels is the second to be cleared during cache
|
||||
* invalidation will have a dangling reference to an already-deleted smgr
|
||||
* relation. Rather than trying to avoid this by ordering operations
|
||||
* just so, it's easiest to not have the relcache entries there at all.
|
||||
* (Fortunately, since one of the entries is local in our transaction,
|
||||
* it's sufficient to clear out our own relcache this way; the problem
|
||||
* cannot arise for other backends when they see our update on the
|
||||
* non-local relation.)
|
||||
* and that will be out of date as soon as we do
|
||||
* CommandCounterIncrement. Whichever of the rels is the second to be
|
||||
* cleared during cache invalidation will have a dangling reference to
|
||||
* an already-deleted smgr relation. Rather than trying to avoid this
|
||||
* by ordering operations just so, it's easiest to not have the
|
||||
* relcache entries there at all. (Fortunately, since one of the
|
||||
* entries is local in our transaction, it's sufficient to clear out
|
||||
* our own relcache this way; the problem cannot arise for other
|
||||
* backends when they see our update on the non-local relation.)
|
||||
*/
|
||||
RelationForgetRelation(r1);
|
||||
RelationForgetRelation(r2);
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.78 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.79 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -123,10 +123,10 @@ CommentObject(CommentStmt *stmt)
|
||||
CommentOpClass(stmt->objname, stmt->objargs, stmt->comment);
|
||||
break;
|
||||
case OBJECT_LARGEOBJECT:
|
||||
CommentLargeObject(stmt->objname, stmt->comment);
|
||||
CommentLargeObject(stmt->objname, stmt->comment);
|
||||
break;
|
||||
case OBJECT_CAST:
|
||||
CommentCast(stmt->objname, stmt->objargs, stmt->comment);
|
||||
CommentCast(stmt->objname, stmt->objargs, stmt->comment);
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized object type: %d",
|
||||
@@ -401,8 +401,8 @@ CommentAttribute(List *qualname, char *comment)
|
||||
if (attnum == InvalidAttrNumber)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("column \"%s\" of relation \"%s\" does not exist",
|
||||
attrname, RelationGetRelationName(relation))));
|
||||
errmsg("column \"%s\" of relation \"%s\" does not exist",
|
||||
attrname, RelationGetRelationName(relation))));
|
||||
|
||||
/* Create the comment using the relation's oid */
|
||||
|
||||
@@ -462,7 +462,8 @@ CommentDatabase(List *qualname, char *comment)
|
||||
/* Only allow comments on the current database */
|
||||
if (oid != MyDatabaseId)
|
||||
{
|
||||
ereport(WARNING, /* throw just a warning so pg_restore doesn't fail */
|
||||
ereport(WARNING, /* throw just a warning so pg_restore
|
||||
* doesn't fail */
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("database comments may only be applied to the current database")));
|
||||
return;
|
||||
@@ -586,7 +587,7 @@ CommentRule(List *qualname, char *comment)
|
||||
ForwardScanDirection)))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("there are multiple rules named \"%s\"", rulename),
|
||||
errmsg("there are multiple rules named \"%s\"", rulename),
|
||||
errhint("Specify a relation name as well as a rule name.")));
|
||||
|
||||
heap_endscan(scanDesc);
|
||||
@@ -615,8 +616,8 @@ CommentRule(List *qualname, char *comment)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("rule \"%s\" for relation \"%s\" does not exist",
|
||||
rulename, RelationGetRelationName(relation))));
|
||||
errmsg("rule \"%s\" for relation \"%s\" does not exist",
|
||||
rulename, RelationGetRelationName(relation))));
|
||||
Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class);
|
||||
ruleoid = HeapTupleGetOid(tuple);
|
||||
ReleaseSysCache(tuple);
|
||||
@@ -832,8 +833,8 @@ CommentTrigger(List *qualname, char *comment)
|
||||
if (!HeapTupleIsValid(triggertuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("trigger \"%s\" for table \"%s\" does not exist",
|
||||
trigname, RelationGetRelationName(relation))));
|
||||
errmsg("trigger \"%s\" for table \"%s\" does not exist",
|
||||
trigname, RelationGetRelationName(relation))));
|
||||
|
||||
oid = HeapTupleGetOid(triggertuple);
|
||||
|
||||
@@ -924,8 +925,8 @@ CommentConstraint(List *qualname, char *comment)
|
||||
if (!OidIsValid(conOid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("constraint \"%s\" for table \"%s\" does not exist",
|
||||
conName, RelationGetRelationName(relation))));
|
||||
errmsg("constraint \"%s\" for table \"%s\" does not exist",
|
||||
conName, RelationGetRelationName(relation))));
|
||||
|
||||
/* Create the comment with the pg_constraint oid */
|
||||
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
|
||||
@@ -1003,7 +1004,7 @@ CommentLanguage(List *qualname, char *comment)
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to comment on procedural language")));
|
||||
errmsg("must be superuser to comment on procedural language")));
|
||||
|
||||
/* pg_language doesn't have a hard-coded OID, so must look it up */
|
||||
classoid = get_system_catalog_relid(LanguageRelationName);
|
||||
@@ -1084,7 +1085,7 @@ CommentOpClass(List *qualname, List *arguments, char *comment)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
|
||||
NameListToString(qualname), amname)));
|
||||
NameListToString(qualname), amname)));
|
||||
|
||||
opcID = HeapTupleGetOid(tuple);
|
||||
|
||||
@@ -1116,7 +1117,7 @@ CommentLargeObject(List *qualname, char *comment)
|
||||
{
|
||||
Oid loid;
|
||||
Oid classoid;
|
||||
Node *node;
|
||||
Node *node;
|
||||
|
||||
Assert(list_length(qualname) == 1);
|
||||
node = (Node *) linitial(qualname);
|
||||
@@ -1127,19 +1128,20 @@ CommentLargeObject(List *qualname, char *comment)
|
||||
loid = intVal(node);
|
||||
break;
|
||||
case T_Float:
|
||||
|
||||
/*
|
||||
* Values too large for int4 will be represented as Float
|
||||
* constants by the lexer. Accept these if they are valid
|
||||
* OID strings.
|
||||
* constants by the lexer. Accept these if they are valid OID
|
||||
* strings.
|
||||
*/
|
||||
loid = DatumGetObjectId(DirectFunctionCall1(oidin,
|
||||
CStringGetDatum(strVal(node))));
|
||||
CStringGetDatum(strVal(node))));
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized node type: %d",
|
||||
(int) nodeTag(node));
|
||||
/* keep compiler quiet */
|
||||
loid = InvalidOid;
|
||||
loid = InvalidOid;
|
||||
}
|
||||
|
||||
/* check that the large object exists */
|
||||
@@ -1152,7 +1154,7 @@ CommentLargeObject(List *qualname, char *comment)
|
||||
classoid = get_system_catalog_relid(LargeObjectRelationName);
|
||||
|
||||
/* Call CreateComments() to create/drop the comments */
|
||||
CreateComments(loid, classoid, 0, comment);
|
||||
CreateComments(loid, classoid, 0, comment);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1182,7 +1184,7 @@ CommentCast(List *qualname, List *arguments, char *comment)
|
||||
Assert(list_length(arguments) == 1);
|
||||
targettype = (TypeName *) linitial(arguments);
|
||||
Assert(IsA(targettype, TypeName));
|
||||
|
||||
|
||||
sourcetypeid = typenameTypeId(sourcetype);
|
||||
if (!OidIsValid(sourcetypeid))
|
||||
ereport(ERROR,
|
||||
@@ -1210,7 +1212,7 @@ CommentCast(List *qualname, List *arguments, char *comment)
|
||||
|
||||
/* Get the OID of the cast */
|
||||
castOid = HeapTupleGetOid(tuple);
|
||||
|
||||
|
||||
/* Permission check */
|
||||
if (!pg_type_ownercheck(sourcetypeid, GetUserId())
|
||||
&& !pg_type_ownercheck(targettypeid, GetUserId()))
|
||||
@@ -1226,5 +1228,5 @@ CommentCast(List *qualname, List *arguments, char *comment)
|
||||
classoid = get_system_catalog_relid(CastRelationName);
|
||||
|
||||
/* Call CreateComments() to create/drop the comments */
|
||||
CreateComments(castOid, classoid, 0, comment);
|
||||
CreateComments(castOid, classoid, 0, comment);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.14 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.15 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -181,7 +181,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId)
|
||||
Oid conversionOid;
|
||||
HeapTuple tup;
|
||||
Relation rel;
|
||||
Form_pg_conversion convForm;
|
||||
Form_pg_conversion convForm;
|
||||
|
||||
rel = heap_openr(ConversionRelationName, RowExclusiveLock);
|
||||
|
||||
@@ -200,7 +200,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId)
|
||||
|
||||
convForm = (Form_pg_conversion) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -212,7 +212,10 @@ AlterConversionOwner(List *name, AclId newOwnerSysId)
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to change owner")));
|
||||
|
||||
/* Modify the owner --- okay to scribble on tup because it's a copy */
|
||||
/*
|
||||
* Modify the owner --- okay to scribble on tup because it's a
|
||||
* copy
|
||||
*/
|
||||
convForm->conowner = newOwnerSysId;
|
||||
|
||||
simple_heap_update(rel, &tup->t_self, tup);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.229 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.230 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -133,22 +133,22 @@ static void DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
char *delim, char *null_print, bool csv_mode, char *quote,
|
||||
char *escape, List *force_quote_atts, bool fe_copy);
|
||||
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
|
||||
char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
|
||||
List *force_quote_atts);
|
||||
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
|
||||
char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
|
||||
List *force_notnull_atts);
|
||||
static bool CopyReadLine(void);
|
||||
static char *CopyReadAttribute(const char *delim, const char *null_print,
|
||||
CopyReadResult *result, bool *isnull);
|
||||
CopyReadResult *result, bool *isnull);
|
||||
static char *CopyReadAttributeCSV(const char *delim, const char *null_print,
|
||||
char *quote, char *escape,
|
||||
CopyReadResult *result, bool *isnull);
|
||||
char *quote, char *escape,
|
||||
CopyReadResult *result, bool *isnull);
|
||||
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
|
||||
Oid typioparam, bool *isnull);
|
||||
static void CopyAttributeOut(char *string, char *delim);
|
||||
static void CopyAttributeOutCSV(char *string, char *delim, char *quote,
|
||||
char *escape, bool force_quote);
|
||||
char *escape, bool force_quote);
|
||||
static List *CopyGetAttnums(Relation rel, List *attnamelist);
|
||||
static void limit_printout_length(StringInfo buf);
|
||||
|
||||
@@ -413,7 +413,7 @@ CopyGetData(void *databuf, int datasize)
|
||||
/* Try to receive another message */
|
||||
int mtype;
|
||||
|
||||
readmessage:
|
||||
readmessage:
|
||||
mtype = pq_getbyte();
|
||||
if (mtype == EOF)
|
||||
ereport(ERROR,
|
||||
@@ -439,11 +439,12 @@ CopyGetData(void *databuf, int datasize)
|
||||
break;
|
||||
case 'H': /* Flush */
|
||||
case 'S': /* Sync */
|
||||
|
||||
/*
|
||||
* Ignore Flush/Sync for the convenience of
|
||||
* client libraries (such as libpq) that may
|
||||
* send those without noticing that the command
|
||||
* they just sent was COPY.
|
||||
* send those without noticing that the
|
||||
* command they just sent was COPY.
|
||||
*/
|
||||
goto readmessage;
|
||||
default:
|
||||
@@ -693,7 +694,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
bool fe_copy = false;
|
||||
bool binary = false;
|
||||
bool oids = false;
|
||||
bool csv_mode = false;
|
||||
bool csv_mode = false;
|
||||
char *delim = NULL;
|
||||
char *quote = NULL;
|
||||
char *escape = NULL;
|
||||
@@ -773,7 +774,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("conflicting or redundant options")));
|
||||
force_quote = (List *)defel->arg;
|
||||
force_quote = (List *) defel->arg;
|
||||
}
|
||||
else if (strcmp(defel->defname, "force_notnull") == 0)
|
||||
{
|
||||
@@ -781,7 +782,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("conflicting or redundant options")));
|
||||
force_notnull = (List *)defel->arg;
|
||||
force_notnull = (List *) defel->arg;
|
||||
}
|
||||
else
|
||||
elog(ERROR, "option \"%s\" not recognized",
|
||||
@@ -806,7 +807,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
/* Set defaults */
|
||||
if (!delim)
|
||||
delim = csv_mode ? "," : "\t";
|
||||
|
||||
|
||||
if (!null_print)
|
||||
null_print = csv_mode ? "" : "\\N";
|
||||
|
||||
@@ -817,7 +818,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
if (!escape)
|
||||
escape = quote;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Only single-character delimiter strings are supported.
|
||||
*/
|
||||
@@ -862,7 +863,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
if (force_quote != NIL && is_from)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("COPY force quote only available using COPY TO")));
|
||||
errmsg("COPY force quote only available using COPY TO")));
|
||||
|
||||
/*
|
||||
* Check force_notnull
|
||||
@@ -870,11 +871,11 @@ DoCopy(const CopyStmt *stmt)
|
||||
if (!csv_mode && force_notnull != NIL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("COPY force not null available only in CSV mode")));
|
||||
errmsg("COPY force not null available only in CSV mode")));
|
||||
if (force_notnull != NIL && !is_from)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("COPY force not null only available using COPY FROM")));
|
||||
errmsg("COPY force not null only available using COPY FROM")));
|
||||
|
||||
/*
|
||||
* Don't allow the delimiter to appear in the null string.
|
||||
@@ -948,11 +949,11 @@ DoCopy(const CopyStmt *stmt)
|
||||
if (!list_member_int(attnumlist, attnum))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
|
||||
NameStr(attr[attnum - 1]->attname))));
|
||||
errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
|
||||
NameStr(attr[attnum - 1]->attname))));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Check that FORCE NOT NULL references valid COPY columns
|
||||
*/
|
||||
@@ -975,7 +976,7 @@ DoCopy(const CopyStmt *stmt)
|
||||
NameStr(attr[attnum - 1]->attname))));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Set up variables to avoid per-attribute overhead.
|
||||
*/
|
||||
@@ -1152,9 +1153,9 @@ DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
PG_CATCH();
|
||||
{
|
||||
/*
|
||||
* Make sure we turn off old-style COPY OUT mode upon error.
|
||||
* It is okay to do this in all cases, since it does nothing
|
||||
* if the mode is not on.
|
||||
* Make sure we turn off old-style COPY OUT mode upon error. It is
|
||||
* okay to do this in all cases, since it does nothing if the mode
|
||||
* is not on.
|
||||
*/
|
||||
pq_endcopyout(true);
|
||||
PG_RE_THROW();
|
||||
@@ -1202,10 +1203,10 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
{
|
||||
int attnum = lfirst_int(cur);
|
||||
Oid out_func_oid;
|
||||
|
||||
|
||||
if (binary)
|
||||
getTypeBinaryOutputInfo(attr[attnum - 1]->atttypid,
|
||||
&out_func_oid, &typioparams[attnum - 1],
|
||||
&out_func_oid, &typioparams[attnum - 1],
|
||||
&isvarlena[attnum - 1]);
|
||||
else
|
||||
getTypeOutputInfo(attr[attnum - 1]->atttypid,
|
||||
@@ -1266,6 +1267,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
while ((tuple = heap_getnext(scandesc, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
bool need_delim = false;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
MemoryContextReset(mycontext);
|
||||
@@ -1325,13 +1327,13 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
{
|
||||
string = DatumGetCString(FunctionCall3(&out_functions[attnum - 1],
|
||||
value,
|
||||
ObjectIdGetDatum(typioparams[attnum - 1]),
|
||||
ObjectIdGetDatum(typioparams[attnum - 1]),
|
||||
Int32GetDatum(attr[attnum - 1]->atttypmod)));
|
||||
if (csv_mode)
|
||||
{
|
||||
CopyAttributeOutCSV(string, delim, quote, escape,
|
||||
(strcmp(string, null_print) == 0 ||
|
||||
force_quote[attnum - 1]));
|
||||
(strcmp(string, null_print) == 0 ||
|
||||
force_quote[attnum - 1]));
|
||||
}
|
||||
else
|
||||
CopyAttributeOut(string, delim);
|
||||
@@ -1343,7 +1345,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
|
||||
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
|
||||
value,
|
||||
ObjectIdGetDatum(typioparams[attnum - 1])));
|
||||
ObjectIdGetDatum(typioparams[attnum - 1])));
|
||||
/* We assume the result will not have been toasted */
|
||||
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
|
||||
CopySendData(VARDATA(outputbytes),
|
||||
@@ -1444,7 +1446,7 @@ limit_printout_length(StringInfo buf)
|
||||
{
|
||||
#define MAX_COPY_DATA_DISPLAY 100
|
||||
|
||||
int len;
|
||||
int len;
|
||||
|
||||
/* Fast path if definitely okay */
|
||||
if (buf->len <= MAX_COPY_DATA_DISPLAY)
|
||||
@@ -1551,7 +1553,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
/* Fetch the input function and typioparam info */
|
||||
if (binary)
|
||||
getTypeBinaryInputInfo(attr[attnum - 1]->atttypid,
|
||||
&in_func_oid, &typioparams[attnum - 1]);
|
||||
&in_func_oid, &typioparams[attnum - 1]);
|
||||
else
|
||||
getTypeInputInfo(attr[attnum - 1]->atttypid,
|
||||
&in_func_oid, &typioparams[attnum - 1]);
|
||||
@@ -1561,7 +1563,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
force_notnull[attnum - 1] = true;
|
||||
else
|
||||
force_notnull[attnum - 1] = false;
|
||||
|
||||
|
||||
/* Get default info if needed */
|
||||
if (!list_member_int(attnumlist, attnum))
|
||||
{
|
||||
@@ -1603,7 +1605,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
COERCE_IMPLICIT_CAST, false);
|
||||
|
||||
constraintexprs[attnum - 1] = ExecPrepareExpr((Expr *) node,
|
||||
estate);
|
||||
estate);
|
||||
hasConstraints = true;
|
||||
}
|
||||
}
|
||||
@@ -1718,10 +1720,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
done = CopyReadLine();
|
||||
|
||||
/*
|
||||
* EOF at start of line means we're done. If we see EOF
|
||||
* after some characters, we act as though it was newline
|
||||
* followed by EOF, ie, process the line and then exit loop
|
||||
* on next iteration.
|
||||
* EOF at start of line means we're done. If we see EOF after
|
||||
* some characters, we act as though it was newline followed
|
||||
* by EOF, ie, process the line and then exit loop on next
|
||||
* iteration.
|
||||
*/
|
||||
if (done && line_buf.len == 0)
|
||||
break;
|
||||
@@ -1770,29 +1772,29 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
if (csv_mode)
|
||||
{
|
||||
string = CopyReadAttributeCSV(delim, null_print, quote,
|
||||
escape, &result, &isnull);
|
||||
escape, &result, &isnull);
|
||||
if (result == UNTERMINATED_FIELD)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("unterminated CSV quoted field")));
|
||||
errmsg("unterminated CSV quoted field")));
|
||||
}
|
||||
else
|
||||
string = CopyReadAttribute(delim, null_print,
|
||||
string = CopyReadAttribute(delim, null_print,
|
||||
&result, &isnull);
|
||||
|
||||
if (csv_mode && isnull && force_notnull[m])
|
||||
{
|
||||
string = null_print; /* set to NULL string */
|
||||
string = null_print; /* set to NULL string */
|
||||
isnull = false;
|
||||
}
|
||||
|
||||
/* we read an SQL NULL, no need to do anything */
|
||||
/* we read an SQL NULL, no need to do anything */
|
||||
if (!isnull)
|
||||
{
|
||||
copy_attname = NameStr(attr[m]->attname);
|
||||
values[m] = FunctionCall3(&in_functions[m],
|
||||
CStringGetDatum(string),
|
||||
ObjectIdGetDatum(typioparams[m]),
|
||||
ObjectIdGetDatum(typioparams[m]),
|
||||
Int32GetDatum(attr[m]->atttypmod));
|
||||
nulls[m] = ' ';
|
||||
copy_attname = NULL;
|
||||
@@ -1809,7 +1811,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
if (result == NORMAL_ATTR && line_buf.len != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
|
||||
errmsg("extra data after last expected column")));
|
||||
errmsg("extra data after last expected column")));
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1835,8 +1837,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
|
||||
copy_attname = "oid";
|
||||
loaded_oid =
|
||||
DatumGetObjectId(CopyReadBinaryAttribute(0,
|
||||
&oid_in_function,
|
||||
oid_typioparam,
|
||||
&oid_in_function,
|
||||
oid_typioparam,
|
||||
&isnull));
|
||||
if (isnull || loaded_oid == InvalidOid)
|
||||
ereport(ERROR,
|
||||
@@ -2022,14 +2024,14 @@ CopyReadLine(void)
|
||||
result = false;
|
||||
|
||||
/*
|
||||
* In this loop we only care for detecting newlines (\r and/or \n)
|
||||
* and the end-of-copy marker (\.). For backwards compatibility
|
||||
* we allow backslashes to escape newline characters. Backslashes
|
||||
* other than the end marker get put into the line_buf, since
|
||||
* CopyReadAttribute does its own escape processing. These four
|
||||
* characters, and only these four, are assumed the same in frontend
|
||||
* and backend encodings. We do not assume that second and later bytes
|
||||
* of a frontend multibyte character couldn't look like ASCII characters.
|
||||
* In this loop we only care for detecting newlines (\r and/or \n) and
|
||||
* the end-of-copy marker (\.). For backwards compatibility we allow
|
||||
* backslashes to escape newline characters. Backslashes other than
|
||||
* the end marker get put into the line_buf, since CopyReadAttribute
|
||||
* does its own escape processing. These four characters, and only
|
||||
* these four, are assumed the same in frontend and backend encodings.
|
||||
* We do not assume that second and later bytes of a frontend
|
||||
* multibyte character couldn't look like ASCII characters.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
@@ -2120,9 +2122,9 @@ CopyReadLine(void)
|
||||
errmsg("end-of-copy marker does not match previous newline style")));
|
||||
|
||||
/*
|
||||
* In protocol version 3, we should ignore anything
|
||||
* after \. up to the protocol end of copy data. (XXX
|
||||
* maybe better not to treat \. as special?)
|
||||
* In protocol version 3, we should ignore anything after
|
||||
* \. up to the protocol end of copy data. (XXX maybe
|
||||
* better not to treat \. as special?)
|
||||
*/
|
||||
if (copy_dest == COPY_NEW_FE)
|
||||
{
|
||||
@@ -2140,10 +2142,10 @@ CopyReadLine(void)
|
||||
|
||||
/*
|
||||
* When client encoding != server, must be careful to read the
|
||||
* extra bytes of a multibyte character exactly, since the encoding
|
||||
* might not ensure they don't look like ASCII. When the encodings
|
||||
* are the same, we need not do this, since no server encoding we
|
||||
* use has ASCII-like following bytes.
|
||||
* extra bytes of a multibyte character exactly, since the
|
||||
* encoding might not ensure they don't look like ASCII. When the
|
||||
* encodings are the same, we need not do this, since no server
|
||||
* encoding we use has ASCII-like following bytes.
|
||||
*/
|
||||
if (change_encoding)
|
||||
{
|
||||
@@ -2162,7 +2164,7 @@ CopyReadLine(void)
|
||||
if (result)
|
||||
break; /* out of outer loop */
|
||||
}
|
||||
} /* end of outer loop */
|
||||
} /* end of outer loop */
|
||||
|
||||
/*
|
||||
* Done reading the line. Convert it to server encoding.
|
||||
@@ -2170,8 +2172,9 @@ CopyReadLine(void)
|
||||
* Note: set line_buf_converted to true *before* attempting conversion;
|
||||
* this prevents infinite recursion during error reporting should
|
||||
* pg_client_to_server() issue an error, due to copy_in_error_callback
|
||||
* again attempting the same conversion. We'll end up issuing the message
|
||||
* without conversion, which is bad but better than nothing ...
|
||||
* again attempting the same conversion. We'll end up issuing the
|
||||
* message without conversion, which is bad but better than nothing
|
||||
* ...
|
||||
*/
|
||||
line_buf_converted = true;
|
||||
|
||||
@@ -2295,9 +2298,11 @@ CopyReadAttribute(const char *delim, const char *null_print,
|
||||
case 'v':
|
||||
c = '\v';
|
||||
break;
|
||||
/*
|
||||
* in all other cases, take the char after '\' literally
|
||||
*/
|
||||
|
||||
/*
|
||||
* in all other cases, take the char after '\'
|
||||
* literally
|
||||
*/
|
||||
}
|
||||
}
|
||||
appendStringInfoCharMacro(&attribute_buf, c);
|
||||
@@ -2316,7 +2321,7 @@ CopyReadAttribute(const char *delim, const char *null_print,
|
||||
|
||||
|
||||
/*
|
||||
* Read the value of a single attribute in CSV mode,
|
||||
* Read the value of a single attribute in CSV mode,
|
||||
* performing de-escaping as needed. Escaping does not follow the normal
|
||||
* PostgreSQL text mode, but instead "standard" (i.e. common) CSV usage.
|
||||
*
|
||||
@@ -2329,7 +2334,7 @@ CopyReadAttribute(const char *delim, const char *null_print,
|
||||
* *result is set to indicate what terminated the read:
|
||||
* NORMAL_ATTR: column delimiter
|
||||
* END_OF_LINE: end of line
|
||||
* UNTERMINATED_FIELD no quote detected at end of a quoted field
|
||||
* UNTERMINATED_FIELD no quote detected at end of a quoted field
|
||||
*
|
||||
* In any case, the string read up to the terminator (or end of file)
|
||||
* is returned.
|
||||
@@ -2345,15 +2350,15 @@ static char *
|
||||
CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
|
||||
char *escape, CopyReadResult *result, bool *isnull)
|
||||
{
|
||||
char delimc = delim[0];
|
||||
char quotec = quote[0];
|
||||
char escapec = escape[0];
|
||||
char delimc = delim[0];
|
||||
char quotec = quote[0];
|
||||
char escapec = escape[0];
|
||||
char c;
|
||||
int start_cursor = line_buf.cursor;
|
||||
int end_cursor = start_cursor;
|
||||
int input_len;
|
||||
bool in_quote = false;
|
||||
bool saw_quote = false;
|
||||
bool in_quote = false;
|
||||
bool saw_quote = false;
|
||||
|
||||
/* reset attribute_buf to empty */
|
||||
attribute_buf.len = 0;
|
||||
@@ -2367,18 +2372,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
|
||||
/* handle multiline quoted fields */
|
||||
if (in_quote && line_buf.cursor >= line_buf.len)
|
||||
{
|
||||
bool done;
|
||||
bool done;
|
||||
|
||||
switch(eol_type)
|
||||
switch (eol_type)
|
||||
{
|
||||
case EOL_NL:
|
||||
appendStringInfoString(&attribute_buf,"\n");
|
||||
appendStringInfoString(&attribute_buf, "\n");
|
||||
break;
|
||||
case EOL_CR:
|
||||
appendStringInfoString(&attribute_buf,"\r");
|
||||
appendStringInfoString(&attribute_buf, "\r");
|
||||
break;
|
||||
case EOL_CRNL:
|
||||
appendStringInfoString(&attribute_buf,"\r\n");
|
||||
appendStringInfoString(&attribute_buf, "\r\n");
|
||||
break;
|
||||
case EOL_UNKNOWN:
|
||||
/* shouldn't happen - just keep going */
|
||||
@@ -2396,16 +2401,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
|
||||
if (line_buf.cursor >= line_buf.len)
|
||||
break;
|
||||
c = line_buf.data[line_buf.cursor++];
|
||||
/*
|
||||
* unquoted field delimiter
|
||||
|
||||
/*
|
||||
* unquoted field delimiter
|
||||
*/
|
||||
if (!in_quote && c == delimc)
|
||||
{
|
||||
*result = NORMAL_ATTR;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* start of quoted field (or part of field)
|
||||
|
||||
/*
|
||||
* start of quoted field (or part of field)
|
||||
*/
|
||||
if (!in_quote && c == quotec)
|
||||
{
|
||||
@@ -2413,18 +2420,20 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
|
||||
in_quote = true;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
|
||||
/*
|
||||
* escape within a quoted field
|
||||
*/
|
||||
if (in_quote && c == escapec)
|
||||
{
|
||||
/*
|
||||
* peek at the next char if available, and escape it if it
|
||||
* is an escape char or a quote char
|
||||
/*
|
||||
* peek at the next char if available, and escape it if it is
|
||||
* an escape char or a quote char
|
||||
*/
|
||||
if (line_buf.cursor <= line_buf.len)
|
||||
{
|
||||
char nextc = line_buf.data[line_buf.cursor];
|
||||
char nextc = line_buf.data[line_buf.cursor];
|
||||
|
||||
if (nextc == escapec || nextc == quotec)
|
||||
{
|
||||
appendStringInfoCharMacro(&attribute_buf, nextc);
|
||||
@@ -2433,10 +2442,11 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* end of quoted field.
|
||||
* Must do this test after testing for escape in case quote char
|
||||
* and escape char are the same (which is the common case).
|
||||
* end of quoted field. Must do this test after testing for escape
|
||||
* in case quote char and escape char are the same (which is the
|
||||
* common case).
|
||||
*/
|
||||
if (in_quote && c == quotec)
|
||||
{
|
||||
@@ -2586,7 +2596,7 @@ CopyAttributeOut(char *server_string, char *delim)
|
||||
}
|
||||
|
||||
/*
|
||||
* Send CSV representation of one attribute, with conversion and
|
||||
* Send CSV representation of one attribute, with conversion and
|
||||
* CSV type escaping
|
||||
*/
|
||||
static void
|
||||
@@ -2596,9 +2606,9 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote,
|
||||
char *string;
|
||||
char c;
|
||||
char delimc = delim[0];
|
||||
char quotec = quote[0];
|
||||
char escapec = escape[0];
|
||||
char *test_string;
|
||||
char quotec = quote[0];
|
||||
char escapec = escape[0];
|
||||
char *test_string;
|
||||
bool same_encoding;
|
||||
int mblen;
|
||||
int i;
|
||||
@@ -2610,13 +2620,14 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote,
|
||||
else
|
||||
string = server_string;
|
||||
|
||||
/* have to run through the string twice,
|
||||
* first time to see if it needs quoting, second to actually send it
|
||||
/*
|
||||
* have to run through the string twice, first time to see if it needs
|
||||
* quoting, second to actually send it
|
||||
*/
|
||||
|
||||
for(test_string = string;
|
||||
!use_quote && (c = *test_string) != '\0';
|
||||
test_string += mblen)
|
||||
for (test_string = string;
|
||||
!use_quote && (c = *test_string) != '\0';
|
||||
test_string += mblen)
|
||||
{
|
||||
if (c == delimc || c == quotec || c == '\n' || c == '\r')
|
||||
use_quote = true;
|
||||
@@ -2695,8 +2706,8 @@ CopyGetAttnums(Relation rel, List *attnamelist)
|
||||
if (list_member_int(attnums, attnum))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_COLUMN),
|
||||
errmsg("column \"%s\" specified more than once",
|
||||
name)));
|
||||
errmsg("column \"%s\" specified more than once",
|
||||
name)));
|
||||
attnums = lappend_int(attnums, attnum);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.140 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.141 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -78,7 +78,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
Oid dboid;
|
||||
AclId datdba;
|
||||
ListCell *option;
|
||||
DefElem *dtablespacename = NULL;
|
||||
DefElem *dtablespacename = NULL;
|
||||
DefElem *downer = NULL;
|
||||
DefElem *dtemplate = NULL;
|
||||
DefElem *dencoding = NULL;
|
||||
@@ -86,6 +86,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
char *dbowner = NULL;
|
||||
char *dbtemplate = NULL;
|
||||
int encoding = -1;
|
||||
|
||||
#ifndef WIN32
|
||||
char buf[2 * MAXPGPATH + 100];
|
||||
#endif
|
||||
@@ -224,7 +225,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
&src_vacuumxid, &src_frozenxid, &src_deftablespace))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_DATABASE),
|
||||
errmsg("template database \"%s\" does not exist", dbtemplate)));
|
||||
errmsg("template database \"%s\" does not exist", dbtemplate)));
|
||||
|
||||
/*
|
||||
* Permission check: to copy a DB that's not marked datistemplate, you
|
||||
@@ -265,7 +266,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
if (dtablespacename && dtablespacename->arg)
|
||||
{
|
||||
char *tablespacename;
|
||||
AclResult aclresult;
|
||||
AclResult aclresult;
|
||||
|
||||
tablespacename = strVal(dtablespacename->arg);
|
||||
dst_deftablespace = get_tablespace_oid(tablespacename);
|
||||
@@ -275,11 +276,11 @@ createdb(const CreatedbStmt *stmt)
|
||||
errmsg("tablespace \"%s\" does not exist",
|
||||
tablespacename)));
|
||||
/* check permissions */
|
||||
aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(),
|
||||
aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(),
|
||||
ACL_CREATE);
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
|
||||
tablespacename);
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
|
||||
tablespacename);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -308,22 +309,22 @@ createdb(const CreatedbStmt *stmt)
|
||||
closeAllVfds();
|
||||
|
||||
/*
|
||||
* Iterate through all tablespaces of the template database, and
|
||||
* copy each one to the new database.
|
||||
* Iterate through all tablespaces of the template database, and copy
|
||||
* each one to the new database.
|
||||
*
|
||||
* If we are trying to change the default tablespace of the template,
|
||||
* we require that the template not have any files in the new default
|
||||
* tablespace. This avoids the need to merge two subdirectories.
|
||||
* This could probably be improved later.
|
||||
* If we are trying to change the default tablespace of the template, we
|
||||
* require that the template not have any files in the new default
|
||||
* tablespace. This avoids the need to merge two subdirectories. This
|
||||
* could probably be improved later.
|
||||
*/
|
||||
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
|
||||
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
Oid srctablespace = HeapTupleGetOid(tuple);
|
||||
Oid dsttablespace;
|
||||
char *srcpath;
|
||||
char *dstpath;
|
||||
Oid srctablespace = HeapTupleGetOid(tuple);
|
||||
Oid dsttablespace;
|
||||
char *srcpath;
|
||||
char *dstpath;
|
||||
struct stat st;
|
||||
|
||||
/* No need to copy global tablespace */
|
||||
@@ -351,10 +352,11 @@ createdb(const CreatedbStmt *stmt)
|
||||
remove_dbtablespaces(dboid);
|
||||
ereport(ERROR,
|
||||
(errmsg("could not initialize database directory"),
|
||||
errdetail("Directory \"%s\" already exists.", dstpath)));
|
||||
errdetail("Directory \"%s\" already exists.", dstpath)));
|
||||
}
|
||||
|
||||
#ifndef WIN32
|
||||
|
||||
/*
|
||||
* Copy this subdirectory to the new location
|
||||
*
|
||||
@@ -374,7 +376,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
errdetail("Failing system command was: %s", buf),
|
||||
errhint("Look in the postmaster's stderr log for more information.")));
|
||||
}
|
||||
#else /* WIN32 */
|
||||
#else /* WIN32 */
|
||||
if (copydir(srcpath, dstpath) != 0)
|
||||
{
|
||||
/* copydir should already have given details of its troubles */
|
||||
@@ -382,7 +384,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errmsg("could not initialize database directory")));
|
||||
}
|
||||
#endif /* WIN32 */
|
||||
#endif /* WIN32 */
|
||||
}
|
||||
heap_endscan(scan);
|
||||
heap_close(rel, AccessShareLock);
|
||||
@@ -772,7 +774,7 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId)
|
||||
Relation rel;
|
||||
ScanKeyData scankey;
|
||||
SysScanDesc scan;
|
||||
Form_pg_database datForm;
|
||||
Form_pg_database datForm;
|
||||
|
||||
rel = heap_openr(DatabaseRelationName, RowExclusiveLock);
|
||||
ScanKeyInit(&scankey,
|
||||
@@ -789,16 +791,17 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId)
|
||||
|
||||
datForm = (Form_pg_database) GETSTRUCT(tuple);
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is to be consistent with other objects.
|
||||
* command to have succeeded. This is to be consistent with other
|
||||
* objects.
|
||||
*/
|
||||
if (datForm->datdba != newOwnerSysId)
|
||||
{
|
||||
Datum repl_val[Natts_pg_database];
|
||||
char repl_null[Natts_pg_database];
|
||||
char repl_repl[Natts_pg_database];
|
||||
Acl *newAcl;
|
||||
Acl *newAcl;
|
||||
Datum aclDatum;
|
||||
bool isNull;
|
||||
HeapTuple newtuple;
|
||||
@@ -821,9 +824,9 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId)
|
||||
* necessary when the ACL is non-null.
|
||||
*/
|
||||
aclDatum = heap_getattr(tuple,
|
||||
Anum_pg_database_datacl,
|
||||
RelationGetDescr(rel),
|
||||
&isNull);
|
||||
Anum_pg_database_datacl,
|
||||
RelationGetDescr(rel),
|
||||
&isNull);
|
||||
if (!isNull)
|
||||
{
|
||||
newAcl = aclnewowner(DatumGetAclP(aclDatum),
|
||||
@@ -941,16 +944,16 @@ have_createdb_privilege(void)
|
||||
static void
|
||||
remove_dbtablespaces(Oid db_id)
|
||||
{
|
||||
Relation rel;
|
||||
Relation rel;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple tuple;
|
||||
HeapTuple tuple;
|
||||
|
||||
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
|
||||
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
Oid dsttablespace = HeapTupleGetOid(tuple);
|
||||
char *dstpath;
|
||||
Oid dsttablespace = HeapTupleGetOid(tuple);
|
||||
char *dstpath;
|
||||
struct stat st;
|
||||
|
||||
/* Don't mess with the global tablespace */
|
||||
@@ -969,9 +972,9 @@ remove_dbtablespaces(Oid db_id)
|
||||
if (!rmtree(dstpath, true))
|
||||
{
|
||||
ereport(WARNING,
|
||||
(errmsg("could not remove database directory \"%s\"",
|
||||
dstpath),
|
||||
errhint("Look in the postmaster's stderr log for more information.")));
|
||||
(errmsg("could not remove database directory \"%s\"",
|
||||
dstpath),
|
||||
errhint("Look in the postmaster's stderr log for more information.")));
|
||||
}
|
||||
|
||||
pfree(dstpath);
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.90 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.91 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@@ -126,8 +126,8 @@ bool
|
||||
defGetBoolean(DefElem *def)
|
||||
{
|
||||
/*
|
||||
* Presently, boolean flags must simply be present or absent.
|
||||
* Later we could allow 'flag = t', 'flag = f', etc.
|
||||
* Presently, boolean flags must simply be present or absent. Later we
|
||||
* could allow 'flag = t', 'flag = f', etc.
|
||||
*/
|
||||
if (def->arg == NULL)
|
||||
return true;
|
||||
@@ -265,7 +265,7 @@ defGetTypeLength(DefElem *def)
|
||||
case T_TypeName:
|
||||
/* cope if grammar chooses to believe "variable" is a typename */
|
||||
if (pg_strcasecmp(TypeNameToString((TypeName *) def->arg),
|
||||
"variable") == 0)
|
||||
"variable") == 0)
|
||||
return -1; /* variable length */
|
||||
break;
|
||||
case T_List:
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994-5, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.123 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.124 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -317,7 +317,7 @@ explain_outNode(StringInfo str,
|
||||
Plan *outer_plan,
|
||||
int indent, ExplainState *es)
|
||||
{
|
||||
ListCell *l;
|
||||
ListCell *l;
|
||||
char *pname;
|
||||
int i;
|
||||
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
* functioncmds.c
|
||||
*
|
||||
* Routines for CREATE and DROP FUNCTION commands and CREATE and DROP
|
||||
* CAST commands.
|
||||
* CAST commands.
|
||||
*
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.51 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.52 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* These routines take the parse tree and pick out the
|
||||
@@ -449,14 +449,14 @@ CreateFunction(CreateFunctionStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("language \"%s\" does not exist", languageName),
|
||||
(strcmp(languageName, "plperl") == 0 ||
|
||||
strcmp(languageName, "plperlu") == 0 ||
|
||||
strcmp(languageName, "plpgsql") == 0 ||
|
||||
strcmp(languageName, "plpythonu") == 0 ||
|
||||
strcmp(languageName, "pltcl") == 0 ||
|
||||
strcmp(languageName, "pltclu") == 0) ?
|
||||
(strcmp(languageName, "plperl") == 0 ||
|
||||
strcmp(languageName, "plperlu") == 0 ||
|
||||
strcmp(languageName, "plpgsql") == 0 ||
|
||||
strcmp(languageName, "plpythonu") == 0 ||
|
||||
strcmp(languageName, "pltcl") == 0 ||
|
||||
strcmp(languageName, "pltclu") == 0) ?
|
||||
errhint("You need to use \"createlang\" to load the language into the database.") : 0));
|
||||
|
||||
|
||||
languageOid = HeapTupleGetOid(languageTuple);
|
||||
languageStruct = (Form_pg_language) GETSTRUCT(languageTuple);
|
||||
|
||||
@@ -490,7 +490,7 @@ CreateFunction(CreateFunctionStmt *stmt)
|
||||
&prorettype, &returnsSet);
|
||||
|
||||
parameterCount = examine_parameter_list(stmt->parameters, languageOid,
|
||||
parameterTypes, parameterNames);
|
||||
parameterTypes, parameterNames);
|
||||
|
||||
compute_attributes_with_style(stmt->withClause, &isStrict, &volatility);
|
||||
|
||||
@@ -739,8 +739,8 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId)
|
||||
procOid = LookupFuncNameTypeNames(name, argtypes, false);
|
||||
|
||||
tup = SearchSysCache(PROCOID,
|
||||
ObjectIdGetDatum(procOid),
|
||||
0, 0, 0);
|
||||
ObjectIdGetDatum(procOid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
elog(ERROR, "cache lookup failed for function %u", procOid);
|
||||
procForm = (Form_pg_proc) GETSTRUCT(tup);
|
||||
@@ -750,9 +750,9 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId)
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("\"%s\" is an aggregate function",
|
||||
NameListToString(name)),
|
||||
errhint("Use ALTER AGGREGATE to change owner of aggregate functions.")));
|
||||
errhint("Use ALTER AGGREGATE to change owner of aggregate functions.")));
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -761,7 +761,7 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId)
|
||||
Datum repl_val[Natts_pg_proc];
|
||||
char repl_null[Natts_pg_proc];
|
||||
char repl_repl[Natts_pg_proc];
|
||||
Acl *newAcl;
|
||||
Acl *newAcl;
|
||||
Datum aclDatum;
|
||||
bool isNull;
|
||||
HeapTuple newtuple;
|
||||
@@ -968,7 +968,7 @@ CreateCast(CreateCastStmt *stmt)
|
||||
if (nargs < 1 || nargs > 3)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("cast function must take one to three arguments")));
|
||||
errmsg("cast function must take one to three arguments")));
|
||||
if (procstruct->proargtypes[0] != sourcetypeid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.124 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.125 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -47,10 +47,10 @@
|
||||
/* non-export function prototypes */
|
||||
static void CheckPredicate(Expr *predicate);
|
||||
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
|
||||
List *attList,
|
||||
Oid relId,
|
||||
char *accessMethodName, Oid accessMethodId,
|
||||
bool isconstraint);
|
||||
List *attList,
|
||||
Oid relId,
|
||||
char *accessMethodName, Oid accessMethodId,
|
||||
bool isconstraint);
|
||||
static Oid GetIndexOpClass(List *opclass, Oid attrType,
|
||||
char *accessMethodName, Oid accessMethodId);
|
||||
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
|
||||
@@ -143,7 +143,8 @@ DefineIndex(RangeVar *heapRelation,
|
||||
* Verify we (still) have CREATE rights in the rel's namespace.
|
||||
* (Presumably we did when the rel was created, but maybe not
|
||||
* anymore.) Skip check if caller doesn't want it. Also skip check
|
||||
* if bootstrapping, since permissions machinery may not be working yet.
|
||||
* if bootstrapping, since permissions machinery may not be working
|
||||
* yet.
|
||||
*/
|
||||
if (check_rights && !IsBootstrapProcessingMode())
|
||||
{
|
||||
@@ -159,7 +160,7 @@ DefineIndex(RangeVar *heapRelation,
|
||||
/* Determine tablespace to use */
|
||||
if (tableSpaceName)
|
||||
{
|
||||
AclResult aclresult;
|
||||
AclResult aclresult;
|
||||
|
||||
tablespaceId = get_tablespace_oid(tableSpaceName);
|
||||
if (!OidIsValid(tablespaceId))
|
||||
@@ -173,7 +174,9 @@ DefineIndex(RangeVar *heapRelation,
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
|
||||
tableSpaceName);
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Use the parent rel's tablespace */
|
||||
tablespaceId = get_rel_tablespace(relationId);
|
||||
/* Note there is no additional permission check in this path */
|
||||
@@ -256,9 +259,9 @@ DefineIndex(RangeVar *heapRelation,
|
||||
|
||||
/*
|
||||
* If ALTER TABLE, check that there isn't already a PRIMARY KEY.
|
||||
* In CREATE TABLE, we have faith that the parser rejected multiple
|
||||
* pkey clauses; and CREATE INDEX doesn't have a way to say
|
||||
* PRIMARY KEY, so it's no problem either.
|
||||
* In CREATE TABLE, we have faith that the parser rejected
|
||||
* multiple pkey clauses; and CREATE INDEX doesn't have a way to
|
||||
* say PRIMARY KEY, so it's no problem either.
|
||||
*/
|
||||
if (is_alter_table &&
|
||||
relationHasPrimaryKey(rel))
|
||||
@@ -270,8 +273,8 @@ DefineIndex(RangeVar *heapRelation,
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that all of the attributes in a primary key are marked as not
|
||||
* null, otherwise attempt to ALTER TABLE .. SET NOT NULL
|
||||
* Check that all of the attributes in a primary key are marked as
|
||||
* not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
|
||||
*/
|
||||
cmds = NIL;
|
||||
foreach(keys, attributeList)
|
||||
@@ -294,7 +297,7 @@ DefineIndex(RangeVar *heapRelation,
|
||||
if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
|
||||
{
|
||||
/* Add a subcommand to make this one NOT NULL */
|
||||
AlterTableCmd *cmd = makeNode(AlterTableCmd);
|
||||
AlterTableCmd *cmd = makeNode(AlterTableCmd);
|
||||
|
||||
cmd->subtype = AT_SetNotNull;
|
||||
cmd->name = key->name;
|
||||
@@ -318,15 +321,15 @@ DefineIndex(RangeVar *heapRelation,
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
|
||||
* to child tables? Currently, since the PRIMARY KEY
|
||||
* itself doesn't cascade, we don't cascade the
|
||||
* notnull constraint(s) either; but this is pretty debatable.
|
||||
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
|
||||
* tables? Currently, since the PRIMARY KEY itself doesn't
|
||||
* cascade, we don't cascade the notnull constraint(s) either; but
|
||||
* this is pretty debatable.
|
||||
*
|
||||
* XXX: possible future improvement: when being called from
|
||||
* ALTER TABLE, it would be more efficient to merge this with
|
||||
* the outer ALTER TABLE, so as to avoid two scans. But that
|
||||
* seems to complicate DefineIndex's API unduly.
|
||||
* XXX: possible future improvement: when being called from ALTER
|
||||
* TABLE, it would be more efficient to merge this with the outer
|
||||
* ALTER TABLE, so as to avoid two scans. But that seems to
|
||||
* complicate DefineIndex's API unduly.
|
||||
*/
|
||||
if (cmds)
|
||||
AlterTableInternal(relationId, cmds, false);
|
||||
@@ -352,15 +355,15 @@ DefineIndex(RangeVar *heapRelation,
|
||||
heap_close(rel, NoLock);
|
||||
|
||||
/*
|
||||
* Report index creation if appropriate (delay this till after most
|
||||
* of the error checks)
|
||||
* Report index creation if appropriate (delay this till after most of
|
||||
* the error checks)
|
||||
*/
|
||||
if (isconstraint && !quiet)
|
||||
ereport(NOTICE,
|
||||
(errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
|
||||
is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
|
||||
is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
|
||||
primary ? "PRIMARY KEY" : "UNIQUE",
|
||||
indexRelationName, RelationGetRelationName(rel))));
|
||||
indexRelationName, RelationGetRelationName(rel))));
|
||||
|
||||
index_create(relationId, indexRelationName,
|
||||
indexInfo, accessMethodId, tablespaceId, classObjectId,
|
||||
@@ -450,8 +453,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
|
||||
if (isconstraint)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
errmsg("column \"%s\" named in key does not exist",
|
||||
attribute->name)));
|
||||
errmsg("column \"%s\" named in key does not exist",
|
||||
attribute->name)));
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||
@@ -488,11 +491,11 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
|
||||
if (contain_subplans(attribute->expr))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use subquery in index expression")));
|
||||
errmsg("cannot use subquery in index expression")));
|
||||
if (contain_agg_clause(attribute->expr))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_GROUPING_ERROR),
|
||||
errmsg("cannot use aggregate function in index expression")));
|
||||
errmsg("cannot use aggregate function in index expression")));
|
||||
|
||||
/*
|
||||
* A expression using mutable functions is probably wrong,
|
||||
@@ -647,7 +650,7 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId)
|
||||
* than one exact match, then someone put bogus entries in pg_opclass.
|
||||
*
|
||||
* The initial search is done by namespace.c so that we only consider
|
||||
* opclasses visible in the current namespace search path. (See also
|
||||
* opclasses visible in the current namespace search path. (See also
|
||||
* typcache.c, which applies the same logic, but over all opclasses.)
|
||||
*/
|
||||
for (opclass = OpclassGetCandidates(accessMethodId);
|
||||
@@ -962,16 +965,16 @@ ReindexTable(RangeVar *relation, bool force /* currently unused */ )
|
||||
* separate transaction, so we can release the lock on it right away.
|
||||
*/
|
||||
void
|
||||
ReindexDatabase(const char *dbname, bool force /* currently unused */,
|
||||
ReindexDatabase(const char *dbname, bool force /* currently unused */ ,
|
||||
bool all)
|
||||
{
|
||||
Relation relationRelation;
|
||||
Relation relationRelation;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple tuple;
|
||||
HeapTuple tuple;
|
||||
MemoryContext private_context;
|
||||
MemoryContext old;
|
||||
List *relids = NIL;
|
||||
ListCell *l;
|
||||
List *relids = NIL;
|
||||
ListCell *l;
|
||||
|
||||
AssertArg(dbname);
|
||||
|
||||
@@ -1006,7 +1009,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */,
|
||||
/*
|
||||
* We always want to reindex pg_class first. This ensures that if
|
||||
* there is any corruption in pg_class' indexes, they will be fixed
|
||||
* before we process any other tables. This is critical because
|
||||
* before we process any other tables. This is critical because
|
||||
* reindexing itself will try to update pg_class.
|
||||
*/
|
||||
old = MemoryContextSwitchTo(private_context);
|
||||
@@ -1054,7 +1057,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */,
|
||||
CommitTransactionCommand();
|
||||
foreach(l, relids)
|
||||
{
|
||||
Oid relid = lfirst_oid(l);
|
||||
Oid relid = lfirst_oid(l);
|
||||
|
||||
StartTransactionCommand();
|
||||
SetQuerySnapshot(); /* might be needed for functions in
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.27 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.28 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -300,8 +300,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
errmsg("could not make operator class \"%s\" be default for type %s",
|
||||
opcname,
|
||||
TypeNameToString(stmt->datatype)),
|
||||
errdetail("Operator class \"%s\" already is the default.",
|
||||
NameStr(opclass->opcname))));
|
||||
errdetail("Operator class \"%s\" already is the default.",
|
||||
NameStr(opclass->opcname))));
|
||||
}
|
||||
|
||||
systable_endscan(scan);
|
||||
@@ -419,6 +419,7 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid)
|
||||
if (optup == NULL)
|
||||
elog(ERROR, "cache lookup failed for operator %u", operOid);
|
||||
opform = (Form_pg_operator) GETSTRUCT(optup);
|
||||
|
||||
/*
|
||||
* btree operators must be binary ops returning boolean, and the
|
||||
* left-side input type must match the operator class' input type.
|
||||
@@ -434,10 +435,11 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid)
|
||||
if (opform->oprleft != typeoid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("btree operators must have index type as left input")));
|
||||
errmsg("btree operators must have index type as left input")));
|
||||
|
||||
/*
|
||||
* The subtype is "default" (0) if oprright matches the operator class,
|
||||
* otherwise it is oprright.
|
||||
* The subtype is "default" (0) if oprright matches the operator
|
||||
* class, otherwise it is oprright.
|
||||
*/
|
||||
if (opform->oprright == typeoid)
|
||||
subtype = InvalidOid;
|
||||
@@ -471,6 +473,7 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid)
|
||||
if (proctup == NULL)
|
||||
elog(ERROR, "cache lookup failed for function %u", procOid);
|
||||
procform = (Form_pg_proc) GETSTRUCT(proctup);
|
||||
|
||||
/*
|
||||
* btree support procs must be 2-arg procs returning int4, and the
|
||||
* first input type must match the operator class' input type.
|
||||
@@ -486,10 +489,11 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid)
|
||||
if (procform->proargtypes[0] != typeoid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("btree procedures must have index type as first input")));
|
||||
errmsg("btree procedures must have index type as first input")));
|
||||
|
||||
/*
|
||||
* The subtype is "default" (0) if second input type matches the operator
|
||||
* class, otherwise it is the second input type.
|
||||
* The subtype is "default" (0) if second input type matches the
|
||||
* operator class, otherwise it is the second input type.
|
||||
*/
|
||||
if (procform->proargtypes[1] == typeoid)
|
||||
subtype = InvalidOid;
|
||||
@@ -518,13 +522,13 @@ addClassMember(List **list, OpClassMember *member, bool isProc)
|
||||
if (isProc)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("procedure number %d appears more than once",
|
||||
member->number)));
|
||||
errmsg("procedure number %d appears more than once",
|
||||
member->number)));
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("operator number %d appears more than once",
|
||||
member->number)));
|
||||
errmsg("operator number %d appears more than once",
|
||||
member->number)));
|
||||
}
|
||||
}
|
||||
*list = lappend(*list, member);
|
||||
@@ -885,7 +889,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId)
|
||||
char *opcname;
|
||||
HeapTuple tup;
|
||||
Relation rel;
|
||||
Form_pg_opclass opcForm;
|
||||
Form_pg_opclass opcForm;
|
||||
|
||||
amOid = GetSysCacheOid(AMNAME,
|
||||
CStringGetDatum(access_method),
|
||||
@@ -937,7 +941,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId)
|
||||
}
|
||||
opcForm = (Form_pg_opclass) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -949,7 +953,10 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId)
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to change owner")));
|
||||
|
||||
/* Modify the owner --- okay to scribble on tup because it's a copy */
|
||||
/*
|
||||
* Modify the owner --- okay to scribble on tup because it's a
|
||||
* copy
|
||||
*/
|
||||
opcForm->opcowner = newOwnerSysId;
|
||||
|
||||
simple_heap_update(rel, &tup->t_self, tup);
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.18 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.19 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@@ -275,7 +275,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
|
||||
Oid operOid;
|
||||
HeapTuple tup;
|
||||
Relation rel;
|
||||
Form_pg_operator oprForm;
|
||||
Form_pg_operator oprForm;
|
||||
|
||||
rel = heap_openr(OperatorRelationName, RowExclusiveLock);
|
||||
|
||||
@@ -283,14 +283,14 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
|
||||
false);
|
||||
|
||||
tup = SearchSysCacheCopy(OPEROID,
|
||||
ObjectIdGetDatum(operOid),
|
||||
0, 0, 0);
|
||||
ObjectIdGetDatum(operOid),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
elog(ERROR, "cache lookup failed for operator %u", operOid);
|
||||
|
||||
oprForm = (Form_pg_operator) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -302,7 +302,10 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to change owner")));
|
||||
|
||||
/* Modify the owner --- okay to scribble on tup because it's a copy */
|
||||
/*
|
||||
* Modify the owner --- okay to scribble on tup because it's a
|
||||
* copy
|
||||
*/
|
||||
oprForm->oprowner = newOwnerSysId;
|
||||
|
||||
simple_heap_update(rel, &tup->t_self, tup);
|
||||
@@ -314,5 +317,3 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
|
||||
heap_freetuple(tup);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.32 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.33 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -106,10 +106,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
|
||||
|
||||
/*
|
||||
* Also copy the outer portal's parameter list into the inner portal's
|
||||
* memory context. We want to pass down the parameter values in case
|
||||
* we had a command like
|
||||
* DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
|
||||
* This will have been parsed using the outer parameter set and the
|
||||
* memory context. We want to pass down the parameter values in case
|
||||
* we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo =
|
||||
* $1 This will have been parsed using the outer parameter set and the
|
||||
* parameter value needs to be preserved for use when the cursor is
|
||||
* executed.
|
||||
*/
|
||||
@@ -180,8 +179,8 @@ PerformPortalFetch(FetchStmt *stmt,
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_CURSOR),
|
||||
errmsg("cursor \"%s\" does not exist", stmt->portalname)));
|
||||
return; /* keep compiler happy */
|
||||
errmsg("cursor \"%s\" does not exist", stmt->portalname)));
|
||||
return; /* keep compiler happy */
|
||||
}
|
||||
|
||||
/* Adjust dest if needed. MOVE wants destination None */
|
||||
@@ -228,7 +227,7 @@ PerformPortalClose(const char *name)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_CURSOR),
|
||||
errmsg("cursor \"%s\" does not exist", name)));
|
||||
return; /* keep compiler happy */
|
||||
return; /* keep compiler happy */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -354,8 +353,9 @@ PersistHoldablePortal(Portal portal)
|
||||
MemoryContextSwitchTo(PortalContext);
|
||||
|
||||
/*
|
||||
* Rewind the executor: we need to store the entire result set in the
|
||||
* tuplestore, so that subsequent backward FETCHs can be processed.
|
||||
* Rewind the executor: we need to store the entire result set in
|
||||
* the tuplestore, so that subsequent backward FETCHs can be
|
||||
* processed.
|
||||
*/
|
||||
ExecutorRewind(queryDesc);
|
||||
|
||||
@@ -371,15 +371,15 @@ PersistHoldablePortal(Portal portal)
|
||||
/*
|
||||
* Now shut down the inner executor.
|
||||
*/
|
||||
portal->queryDesc = NULL; /* prevent double shutdown */
|
||||
portal->queryDesc = NULL; /* prevent double shutdown */
|
||||
ExecutorEnd(queryDesc);
|
||||
|
||||
/*
|
||||
* Reset the position in the result set: ideally, this could be
|
||||
* implemented by just skipping straight to the tuple # that we need
|
||||
* to be at, but the tuplestore API doesn't support that. So we start
|
||||
* at the beginning of the tuplestore and iterate through it until we
|
||||
* reach where we need to be. FIXME someday?
|
||||
* implemented by just skipping straight to the tuple # that we
|
||||
* need to be at, but the tuplestore API doesn't support that. So
|
||||
* we start at the beginning of the tuplestore and iterate through
|
||||
* it until we reach where we need to be. FIXME someday?
|
||||
*/
|
||||
MemoryContextSwitchTo(portal->holdContext);
|
||||
|
||||
@@ -389,8 +389,8 @@ PersistHoldablePortal(Portal portal)
|
||||
|
||||
if (portal->posOverflow) /* oops, cannot trust portalPos */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not reposition held cursor")));
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not reposition held cursor")));
|
||||
|
||||
tuplestore_rescan(portal->holdStore);
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
* Copyright (c) 2002-2004, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.30 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.31 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -211,7 +211,8 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
|
||||
int nargs = list_length(argtypes);
|
||||
ParamListInfo paramLI;
|
||||
List *exprstates;
|
||||
ListCell *le, *la;
|
||||
ListCell *le,
|
||||
*la;
|
||||
int i = 0;
|
||||
|
||||
/* Parser should have caught this error, but check for safety */
|
||||
@@ -510,7 +511,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
|
||||
}
|
||||
|
||||
/* Explain each query */
|
||||
forboth (q, query_list, p, plan_list)
|
||||
forboth(q, query_list, p, plan_list)
|
||||
{
|
||||
Query *query = (Query *) lfirst(q);
|
||||
Plan *plan = (Plan *) lfirst(p);
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.54 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.55 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -101,8 +101,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("function %s must return type \"language_handler\"",
|
||||
NameListToString(stmt->plhandler))));
|
||||
errmsg("function %s must return type \"language_handler\"",
|
||||
NameListToString(stmt->plhandler))));
|
||||
}
|
||||
|
||||
/* validate the validator function */
|
||||
@@ -126,12 +126,12 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
|
||||
|
||||
i = 0;
|
||||
namestrcpy(&langname, languageName);
|
||||
values[i++] = NameGetDatum(&langname); /* lanname */
|
||||
values[i++] = BoolGetDatum(true); /* lanispl */
|
||||
values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */
|
||||
values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */
|
||||
values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */
|
||||
nulls[i] = 'n'; /* lanacl */
|
||||
values[i++] = NameGetDatum(&langname); /* lanname */
|
||||
values[i++] = BoolGetDatum(true); /* lanispl */
|
||||
values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */
|
||||
values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */
|
||||
values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */
|
||||
nulls[i] = 'n'; /* lanacl */
|
||||
|
||||
rel = heap_openr(LanguageRelationName, RowExclusiveLock);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.23 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.24 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -103,12 +103,12 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
|
||||
errdetail("The prefix \"pg_\" is reserved for system schemas.")));
|
||||
|
||||
/*
|
||||
* Select default tablespace for schema. If not given, use zero
|
||||
* which implies the database's default tablespace.
|
||||
* Select default tablespace for schema. If not given, use zero which
|
||||
* implies the database's default tablespace.
|
||||
*/
|
||||
if (stmt->tablespacename)
|
||||
{
|
||||
AclResult aclresult;
|
||||
AclResult aclresult;
|
||||
|
||||
tablespaceId = get_tablespace_oid(stmt->tablespacename);
|
||||
if (!OidIsValid(tablespaceId))
|
||||
@@ -122,7 +122,9 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
|
||||
stmt->tablespacename);
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
tablespaceId = InvalidOid;
|
||||
/* note there is no permission check in this path */
|
||||
}
|
||||
@@ -316,20 +318,20 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId)
|
||||
{
|
||||
HeapTuple tup;
|
||||
Relation rel;
|
||||
Form_pg_namespace nspForm;
|
||||
Form_pg_namespace nspForm;
|
||||
|
||||
rel = heap_openr(NamespaceRelationName, RowExclusiveLock);
|
||||
|
||||
tup = SearchSysCache(NAMESPACENAME,
|
||||
CStringGetDatum(name),
|
||||
0, 0, 0);
|
||||
CStringGetDatum(name),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(tup))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_SCHEMA),
|
||||
errmsg("schema \"%s\" does not exist", name)));
|
||||
nspForm = (Form_pg_namespace) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -338,7 +340,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId)
|
||||
Datum repl_val[Natts_pg_namespace];
|
||||
char repl_null[Natts_pg_namespace];
|
||||
char repl_repl[Natts_pg_namespace];
|
||||
Acl *newAcl;
|
||||
Acl *newAcl;
|
||||
Datum aclDatum;
|
||||
bool isNull;
|
||||
HeapTuple newtuple;
|
||||
@@ -377,7 +379,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId)
|
||||
|
||||
heap_freetuple(newtuple);
|
||||
}
|
||||
|
||||
|
||||
ReleaseSysCache(tup);
|
||||
heap_close(rel, NoLock);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.115 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.116 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -326,7 +326,7 @@ AlterSequence(AlterSeqStmt *stmt)
|
||||
memcpy(seq, &new, sizeof(FormData_pg_sequence));
|
||||
|
||||
/* Clear local cache so that we don't think we have cached numbers */
|
||||
elm->last = new.last_value; /* last returned number */
|
||||
elm->last = new.last_value; /* last returned number */
|
||||
elm->cached = new.last_value; /* last cached number (forget
|
||||
* cached values) */
|
||||
|
||||
@@ -950,26 +950,22 @@ init_params(List *options, Form_pg_sequence new, bool isInit)
|
||||
|
||||
/* MAXVALUE (null arg means NO MAXVALUE) */
|
||||
if (max_value != NULL && max_value->arg)
|
||||
{
|
||||
new->max_value = defGetInt64(max_value);
|
||||
}
|
||||
else if (isInit || max_value != NULL)
|
||||
{
|
||||
if (new->increment_by > 0)
|
||||
new->max_value = SEQ_MAXVALUE; /* ascending seq */
|
||||
else
|
||||
new->max_value = -1; /* descending seq */
|
||||
new->max_value = -1; /* descending seq */
|
||||
}
|
||||
|
||||
/* MINVALUE (null arg means NO MINVALUE) */
|
||||
if (min_value != NULL && min_value->arg)
|
||||
{
|
||||
new->min_value = defGetInt64(min_value);
|
||||
}
|
||||
else if (isInit || min_value != NULL)
|
||||
{
|
||||
if (new->increment_by > 0)
|
||||
new->min_value = 1; /* ascending seq */
|
||||
new->min_value = 1; /* ascending seq */
|
||||
else
|
||||
new->min_value = SEQ_MINVALUE; /* descending seq */
|
||||
}
|
||||
@@ -1073,7 +1069,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
buffer = XLogReadBuffer(true, reln, 0);
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u",
|
||||
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
|
||||
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -35,7 +35,7 @@
|
||||
* To allow CREATE DATABASE to give a new database a default tablespace
|
||||
* that's different from the template database's default, we make the
|
||||
* provision that a zero in pg_class.reltablespace means the database's
|
||||
* default tablespace. Without this, CREATE DATABASE would have to go in
|
||||
* default tablespace. Without this, CREATE DATABASE would have to go in
|
||||
* and munge the system catalogs of the new database. This special meaning
|
||||
* of zero also applies in pg_namespace.nsptablespace.
|
||||
*
|
||||
@@ -45,7 +45,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.8 2004/08/08 01:31:11 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.9 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -95,11 +95,11 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
|
||||
{
|
||||
#ifdef HAVE_SYMLINK
|
||||
struct stat st;
|
||||
char *dir;
|
||||
char *dir;
|
||||
|
||||
/*
|
||||
* The global tablespace doesn't have per-database subdirectories,
|
||||
* so nothing to do for it.
|
||||
* The global tablespace doesn't have per-database subdirectories, so
|
||||
* nothing to do for it.
|
||||
*/
|
||||
if (spcNode == GLOBALTABLESPACE_OID)
|
||||
return;
|
||||
@@ -118,7 +118,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
|
||||
* DROP TABLESPACE or TablespaceCreateDbspace is running
|
||||
* concurrently. Simple reads from pg_tablespace are OK.
|
||||
*/
|
||||
Relation rel;
|
||||
Relation rel;
|
||||
|
||||
if (!isRedo)
|
||||
rel = heap_openr(TableSpaceRelationName, ExclusiveLock);
|
||||
@@ -126,8 +126,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
|
||||
rel = NULL;
|
||||
|
||||
/*
|
||||
* Recheck to see if someone created the directory while
|
||||
* we were waiting for lock.
|
||||
* Recheck to see if someone created the directory while we
|
||||
* were waiting for lock.
|
||||
*/
|
||||
if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode))
|
||||
{
|
||||
@@ -139,8 +139,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
|
||||
if (mkdir(dir, S_IRWXU) < 0)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not create directory \"%s\": %m",
|
||||
dir)));
|
||||
errmsg("could not create directory \"%s\": %m",
|
||||
dir)));
|
||||
}
|
||||
|
||||
/* OK to drop the exclusive lock */
|
||||
@@ -165,7 +165,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
|
||||
}
|
||||
|
||||
pfree(dir);
|
||||
#endif /* HAVE_SYMLINK */
|
||||
#endif /* HAVE_SYMLINK */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -179,13 +179,13 @@ void
|
||||
CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
{
|
||||
#ifdef HAVE_SYMLINK
|
||||
Relation rel;
|
||||
Datum values[Natts_pg_tablespace];
|
||||
Relation rel;
|
||||
Datum values[Natts_pg_tablespace];
|
||||
char nulls[Natts_pg_tablespace];
|
||||
HeapTuple tuple;
|
||||
Oid tablespaceoid;
|
||||
char *location;
|
||||
char *linkloc;
|
||||
char *location;
|
||||
char *linkloc;
|
||||
AclId ownerid;
|
||||
|
||||
/* validate */
|
||||
@@ -196,10 +196,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
/* Must be super user */
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("permission denied to create tablespace \"%s\"",
|
||||
stmt->tablespacename),
|
||||
errhint("Must be superuser to create a tablespace.")));
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("permission denied to create tablespace \"%s\"",
|
||||
stmt->tablespacename),
|
||||
errhint("Must be superuser to create a tablespace.")));
|
||||
|
||||
/* However, the eventual owner of the tablespace need not be */
|
||||
if (stmt->owner)
|
||||
@@ -218,7 +218,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
if (strchr(location, '\''))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
errmsg("tablespace location may not contain single quotes")));
|
||||
errmsg("tablespace location may not contain single quotes")));
|
||||
|
||||
/*
|
||||
* Allowing relative paths seems risky
|
||||
@@ -231,9 +231,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
errmsg("tablespace location must be an absolute path")));
|
||||
|
||||
/*
|
||||
* Check that location isn't too long. Remember that we're going to append
|
||||
* '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole path
|
||||
* explicitly? This may be overly conservative.)
|
||||
* Check that location isn't too long. Remember that we're going to
|
||||
* append '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole
|
||||
* path explicitly? This may be overly conservative.)
|
||||
*/
|
||||
if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10))
|
||||
ereport(ERROR,
|
||||
@@ -250,12 +250,12 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
(errcode(ERRCODE_RESERVED_NAME),
|
||||
errmsg("unacceptable tablespace name \"%s\"",
|
||||
stmt->tablespacename),
|
||||
errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
|
||||
errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
|
||||
|
||||
/*
|
||||
* Check that there is no other tablespace by this name. (The
|
||||
* unique index would catch this anyway, but might as well give
|
||||
* a friendlier message.)
|
||||
* Check that there is no other tablespace by this name. (The unique
|
||||
* index would catch this anyway, but might as well give a friendlier
|
||||
* message.)
|
||||
*/
|
||||
if (OidIsValid(get_tablespace_oid(stmt->tablespacename)))
|
||||
ereport(ERROR,
|
||||
@@ -293,14 +293,14 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
heap_freetuple(tuple);
|
||||
|
||||
/*
|
||||
* Attempt to coerce target directory to safe permissions. If this
|
||||
* Attempt to coerce target directory to safe permissions. If this
|
||||
* fails, it doesn't exist or has the wrong owner.
|
||||
*/
|
||||
if (chmod(location, 0700) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not set permissions on directory \"%s\": %m",
|
||||
location)));
|
||||
errmsg("could not set permissions on directory \"%s\": %m",
|
||||
location)));
|
||||
|
||||
/*
|
||||
* Check the target directory is empty.
|
||||
@@ -312,10 +312,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
location)));
|
||||
|
||||
/*
|
||||
* Create the PG_VERSION file in the target directory. This has several
|
||||
* purposes: to make sure we can write in the directory, to prevent
|
||||
* someone from creating another tablespace pointing at the same
|
||||
* directory (the emptiness check above will fail), and to label
|
||||
* Create the PG_VERSION file in the target directory. This has
|
||||
* several purposes: to make sure we can write in the directory, to
|
||||
* prevent someone from creating another tablespace pointing at the
|
||||
* same directory (the emptiness check above will fail), and to label
|
||||
* tablespace directories by PG version.
|
||||
*/
|
||||
set_short_version(location);
|
||||
@@ -337,11 +337,11 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
|
||||
heap_close(rel, RowExclusiveLock);
|
||||
|
||||
#else /* !HAVE_SYMLINK */
|
||||
#else /* !HAVE_SYMLINK */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("tablespaces are not supported on this platform")));
|
||||
#endif /* HAVE_SYMLINK */
|
||||
#endif /* HAVE_SYMLINK */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -353,23 +353,24 @@ void
|
||||
DropTableSpace(DropTableSpaceStmt *stmt)
|
||||
{
|
||||
#ifdef HAVE_SYMLINK
|
||||
char *tablespacename = stmt->tablespacename;
|
||||
HeapScanDesc scandesc;
|
||||
Relation rel;
|
||||
HeapTuple tuple;
|
||||
ScanKeyData entry[1];
|
||||
char *location;
|
||||
Oid tablespaceoid;
|
||||
DIR *dirdesc;
|
||||
char *tablespacename = stmt->tablespacename;
|
||||
HeapScanDesc scandesc;
|
||||
Relation rel;
|
||||
HeapTuple tuple;
|
||||
ScanKeyData entry[1];
|
||||
char *location;
|
||||
Oid tablespaceoid;
|
||||
DIR *dirdesc;
|
||||
struct dirent *de;
|
||||
char *subfile;
|
||||
char *subfile;
|
||||
|
||||
/* don't call this in a transaction block */
|
||||
PreventTransactionChain((void *) stmt, "DROP TABLESPACE");
|
||||
|
||||
/*
|
||||
* Acquire ExclusiveLock on pg_tablespace to ensure that no one else
|
||||
* is trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently.
|
||||
* is trying to do DROP TABLESPACE or TablespaceCreateDbspace
|
||||
* concurrently.
|
||||
*/
|
||||
rel = heap_openr(TableSpaceRelationName, ExclusiveLock);
|
||||
|
||||
@@ -409,15 +410,15 @@ DropTableSpace(DropTableSpaceStmt *stmt)
|
||||
/*
|
||||
* Check if the tablespace still contains any files. We try to rmdir
|
||||
* each per-database directory we find in it. rmdir failure implies
|
||||
* there are still files in that subdirectory, so give up. (We do not
|
||||
* have to worry about undoing any already completed rmdirs, since
|
||||
* the next attempt to use the tablespace from that database will simply
|
||||
* there are still files in that subdirectory, so give up. (We do not
|
||||
* have to worry about undoing any already completed rmdirs, since the
|
||||
* next attempt to use the tablespace from that database will simply
|
||||
* recreate the subdirectory via TablespaceCreateDbspace.)
|
||||
*
|
||||
* Since we hold exclusive lock, no one else should be creating any
|
||||
* fresh subdirectories in parallel. It is possible that new files
|
||||
* are being created within subdirectories, though, so the rmdir
|
||||
* call could fail. Worst consequence is a less friendly error message.
|
||||
* Since we hold exclusive lock, no one else should be creating any fresh
|
||||
* subdirectories in parallel. It is possible that new files are
|
||||
* being created within subdirectories, though, so the rmdir call
|
||||
* could fail. Worst consequence is a less friendly error message.
|
||||
*/
|
||||
dirdesc = AllocateDir(location);
|
||||
if (dirdesc == NULL)
|
||||
@@ -458,8 +459,11 @@ DropTableSpace(DropTableSpaceStmt *stmt)
|
||||
pfree(subfile);
|
||||
}
|
||||
#ifdef WIN32
|
||||
/* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
|
||||
not in released version */
|
||||
|
||||
/*
|
||||
* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
|
||||
* not in released version
|
||||
*/
|
||||
if (GetLastError() == ERROR_NO_MORE_FILES)
|
||||
errno = 0;
|
||||
#endif
|
||||
@@ -494,15 +498,15 @@ DropTableSpace(DropTableSpaceStmt *stmt)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not remove junction dir \"%s\": %m",
|
||||
location)));
|
||||
location)));
|
||||
#endif
|
||||
|
||||
pfree(subfile);
|
||||
pfree(location);
|
||||
|
||||
/*
|
||||
* We have successfully destroyed the infrastructure ... there is
|
||||
* now no way to roll back the DROP ... so proceed to remove the
|
||||
* We have successfully destroyed the infrastructure ... there is now
|
||||
* no way to roll back the DROP ... so proceed to remove the
|
||||
* pg_tablespace tuple.
|
||||
*/
|
||||
simple_heap_delete(rel, &tuple->t_self);
|
||||
@@ -511,11 +515,11 @@ DropTableSpace(DropTableSpaceStmt *stmt)
|
||||
|
||||
heap_close(rel, ExclusiveLock);
|
||||
|
||||
#else /* !HAVE_SYMLINK */
|
||||
#else /* !HAVE_SYMLINK */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("tablespaces are not supported on this platform")));
|
||||
#endif /* HAVE_SYMLINK */
|
||||
#endif /* HAVE_SYMLINK */
|
||||
}
|
||||
|
||||
|
||||
@@ -579,7 +583,7 @@ set_short_version(const char *path)
|
||||
static bool
|
||||
directory_is_empty(const char *path)
|
||||
{
|
||||
DIR *dirdesc;
|
||||
DIR *dirdesc;
|
||||
struct dirent *de;
|
||||
|
||||
dirdesc = AllocateDir(path);
|
||||
@@ -602,8 +606,11 @@ directory_is_empty(const char *path)
|
||||
return false;
|
||||
}
|
||||
#ifdef WIN32
|
||||
/* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
|
||||
not in released version */
|
||||
|
||||
/*
|
||||
* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
|
||||
* not in released version
|
||||
*/
|
||||
if (GetLastError() == ERROR_NO_MORE_FILES)
|
||||
errno = 0;
|
||||
#endif
|
||||
@@ -624,11 +631,11 @@ directory_is_empty(const char *path)
|
||||
Oid
|
||||
get_tablespace_oid(const char *tablespacename)
|
||||
{
|
||||
Oid result;
|
||||
Relation rel;
|
||||
Oid result;
|
||||
Relation rel;
|
||||
HeapScanDesc scandesc;
|
||||
HeapTuple tuple;
|
||||
ScanKeyData entry[1];
|
||||
ScanKeyData entry[1];
|
||||
|
||||
/* Search pg_tablespace */
|
||||
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
|
||||
@@ -645,8 +652,8 @@ get_tablespace_oid(const char *tablespacename)
|
||||
else
|
||||
result = InvalidOid;
|
||||
|
||||
heap_endscan(scandesc);
|
||||
heap_close(rel, AccessShareLock);
|
||||
heap_endscan(scandesc);
|
||||
heap_close(rel, AccessShareLock);
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -659,11 +666,11 @@ get_tablespace_oid(const char *tablespacename)
|
||||
char *
|
||||
get_tablespace_name(Oid spc_oid)
|
||||
{
|
||||
char *result;
|
||||
Relation rel;
|
||||
char *result;
|
||||
Relation rel;
|
||||
HeapScanDesc scandesc;
|
||||
HeapTuple tuple;
|
||||
ScanKeyData entry[1];
|
||||
ScanKeyData entry[1];
|
||||
|
||||
/* Search pg_tablespace */
|
||||
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
|
||||
@@ -681,8 +688,8 @@ get_tablespace_name(Oid spc_oid)
|
||||
else
|
||||
result = NULL;
|
||||
|
||||
heap_endscan(scandesc);
|
||||
heap_close(rel, AccessShareLock);
|
||||
heap_endscan(scandesc);
|
||||
heap_close(rel, AccessShareLock);
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -693,8 +700,8 @@ get_tablespace_name(Oid spc_oid)
|
||||
void
|
||||
RenameTableSpace(const char *oldname, const char *newname)
|
||||
{
|
||||
Relation rel;
|
||||
ScanKeyData entry[1];
|
||||
Relation rel;
|
||||
ScanKeyData entry[1];
|
||||
HeapScanDesc scan;
|
||||
HeapTuple tup;
|
||||
HeapTuple newtuple;
|
||||
@@ -729,7 +736,7 @@ RenameTableSpace(const char *oldname, const char *newname)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_RESERVED_NAME),
|
||||
errmsg("unacceptable tablespace name \"%s\"", newname),
|
||||
errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
|
||||
errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
|
||||
|
||||
/* Make sure the new name doesn't exist */
|
||||
ScanKeyInit(&entry[0],
|
||||
@@ -743,7 +750,7 @@ RenameTableSpace(const char *oldname, const char *newname)
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("tablespace \"%s\" already exists",
|
||||
newname)));
|
||||
|
||||
|
||||
heap_endscan(scan);
|
||||
|
||||
/* OK, update the entry */
|
||||
@@ -761,8 +768,8 @@ RenameTableSpace(const char *oldname, const char *newname)
|
||||
void
|
||||
AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
|
||||
{
|
||||
Relation rel;
|
||||
ScanKeyData entry[1];
|
||||
Relation rel;
|
||||
ScanKeyData entry[1];
|
||||
HeapScanDesc scandesc;
|
||||
Form_pg_tablespace spcForm;
|
||||
HeapTuple tup;
|
||||
@@ -783,7 +790,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
|
||||
|
||||
spcForm = (Form_pg_tablespace) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -792,7 +799,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
|
||||
Datum repl_val[Natts_pg_tablespace];
|
||||
char repl_null[Natts_pg_tablespace];
|
||||
char repl_repl[Natts_pg_tablespace];
|
||||
Acl *newAcl;
|
||||
Acl *newAcl;
|
||||
Datum aclDatum;
|
||||
bool isNull;
|
||||
HeapTuple newtuple;
|
||||
@@ -814,9 +821,9 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
|
||||
* necessary when the ACL is non-null.
|
||||
*/
|
||||
aclDatum = heap_getattr(tup,
|
||||
Anum_pg_tablespace_spcacl,
|
||||
RelationGetDescr(rel),
|
||||
&isNull);
|
||||
Anum_pg_tablespace_spcacl,
|
||||
RelationGetDescr(rel),
|
||||
&isNull);
|
||||
if (!isNull)
|
||||
{
|
||||
newAcl = aclnewowner(DatumGetAclP(aclDatum),
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.167 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.168 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -480,8 +480,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
|
||||
if (!HeapTupleIsValid(tup))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("trigger \"%s\" for table \"%s\" does not exist",
|
||||
trigname, get_rel_name(relid))));
|
||||
errmsg("trigger \"%s\" for table \"%s\" does not exist",
|
||||
trigname, get_rel_name(relid))));
|
||||
|
||||
if (!pg_class_ownercheck(relid, GetUserId()))
|
||||
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
|
||||
@@ -694,8 +694,8 @@ renametrig(Oid relid,
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("trigger \"%s\" for table \"%s\" does not exist",
|
||||
oldname, RelationGetRelationName(targetrel))));
|
||||
errmsg("trigger \"%s\" for table \"%s\" does not exist",
|
||||
oldname, RelationGetRelationName(targetrel))));
|
||||
}
|
||||
|
||||
systable_endscan(tgscan);
|
||||
@@ -1638,7 +1638,7 @@ ltrmark:;
|
||||
* Deferred trigger stuff
|
||||
*
|
||||
* The DeferredTriggersData struct holds data about pending deferred
|
||||
* trigger events during the current transaction tree. The struct and
|
||||
* trigger events during the current transaction tree. The struct and
|
||||
* most of its subsidiary data are kept in TopTransactionContext; however
|
||||
* the individual event records are kept in CurTransactionContext, so that
|
||||
* they will easily go away during subtransaction abort.
|
||||
@@ -1670,7 +1670,7 @@ ltrmark:;
|
||||
* saves a copy, which we use to restore the state if we abort.
|
||||
*
|
||||
* numpushed and numalloc keep control of allocation and storage in the above
|
||||
* stacks. numpushed is essentially the current subtransaction nesting depth.
|
||||
* stacks. numpushed is essentially the current subtransaction nesting depth.
|
||||
*
|
||||
* XXX We need to be able to save the per-event data in a file if it grows too
|
||||
* large.
|
||||
@@ -1723,11 +1723,11 @@ typedef struct DeferredTriggerStatusData *DeferredTriggerStatus;
|
||||
*/
|
||||
typedef struct DeferredTriggerStateData
|
||||
{
|
||||
bool all_isset;
|
||||
bool all_isdeferred;
|
||||
int numstates; /* number of trigstates[] entries in use */
|
||||
int numalloc; /* allocated size of trigstates[] */
|
||||
DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */
|
||||
bool all_isset;
|
||||
bool all_isdeferred;
|
||||
int numstates; /* number of trigstates[] entries in use */
|
||||
int numalloc; /* allocated size of trigstates[] */
|
||||
DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */
|
||||
} DeferredTriggerStateData;
|
||||
|
||||
typedef DeferredTriggerStateData *DeferredTriggerState;
|
||||
@@ -1735,15 +1735,15 @@ typedef DeferredTriggerStateData *DeferredTriggerState;
|
||||
/* Per-transaction data */
|
||||
typedef struct DeferredTriggersData
|
||||
{
|
||||
DeferredTriggerState state;
|
||||
DeferredTriggerEvent events;
|
||||
DeferredTriggerEvent tail_thisxact;
|
||||
DeferredTriggerEvent events_imm;
|
||||
DeferredTriggerEvent *tail_stack;
|
||||
DeferredTriggerEvent *imm_stack;
|
||||
DeferredTriggerState *state_stack;
|
||||
int numpushed;
|
||||
int numalloc;
|
||||
DeferredTriggerState state;
|
||||
DeferredTriggerEvent events;
|
||||
DeferredTriggerEvent tail_thisxact;
|
||||
DeferredTriggerEvent events_imm;
|
||||
DeferredTriggerEvent *tail_stack;
|
||||
DeferredTriggerEvent *imm_stack;
|
||||
DeferredTriggerState *state_stack;
|
||||
int numpushed;
|
||||
int numalloc;
|
||||
} DeferredTriggersData;
|
||||
|
||||
typedef DeferredTriggersData *DeferredTriggers;
|
||||
@@ -1757,7 +1757,7 @@ static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
|
||||
static DeferredTriggerState DeferredTriggerStateCreate(int numalloc);
|
||||
static DeferredTriggerState DeferredTriggerStateCopy(DeferredTriggerState state);
|
||||
static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState state,
|
||||
Oid tgoid, bool tgisdeferred);
|
||||
Oid tgoid, bool tgisdeferred);
|
||||
|
||||
|
||||
/* ----------
|
||||
@@ -1770,8 +1770,8 @@ static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState sta
|
||||
static bool
|
||||
deferredTriggerCheckState(Oid tgoid, int32 itemstate)
|
||||
{
|
||||
bool tgisdeferred;
|
||||
int i;
|
||||
bool tgisdeferred;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* For not-deferrable triggers (i.e. normal AFTER ROW triggers and
|
||||
@@ -1798,7 +1798,8 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
|
||||
|
||||
/*
|
||||
* No ALL state known either, remember the default state as the
|
||||
* current and return that. (XXX why do we bother making a state entry?)
|
||||
* current and return that. (XXX why do we bother making a state
|
||||
* entry?)
|
||||
*/
|
||||
tgisdeferred = ((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0);
|
||||
deferredTriggers->state =
|
||||
@@ -1982,8 +1983,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
|
||||
/*
|
||||
* If immediate_only is true, then the only events that could need
|
||||
* firing are those since events_imm. (But if
|
||||
* events_imm is NULL, we must scan the entire list.)
|
||||
* firing are those since events_imm. (But if events_imm is NULL, we
|
||||
* must scan the entire list.)
|
||||
*/
|
||||
if (immediate_only && deferredTriggers->events_imm != NULL)
|
||||
{
|
||||
@@ -2003,13 +2004,13 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Skip executing cancelled events, and events done by transactions
|
||||
* that are not aborted.
|
||||
* Skip executing cancelled events, and events done by
|
||||
* transactions that are not aborted.
|
||||
*/
|
||||
if (!(event->dte_event & TRIGGER_DEFERRED_CANCELED) ||
|
||||
(event->dte_event & TRIGGER_DEFERRED_DONE &&
|
||||
TransactionIdIsValid(event->dte_done_xid) &&
|
||||
!TransactionIdDidAbort(event->dte_done_xid)))
|
||||
(event->dte_event & TRIGGER_DEFERRED_DONE &&
|
||||
TransactionIdIsValid(event->dte_done_xid) &&
|
||||
!TransactionIdDidAbort(event->dte_done_xid)))
|
||||
{
|
||||
MemoryContextReset(per_tuple_context);
|
||||
|
||||
@@ -2019,8 +2020,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
for (i = 0; i < event->dte_n_items; i++)
|
||||
{
|
||||
if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE &&
|
||||
TransactionIdIsValid(event->dte_item[i].dti_done_xid) &&
|
||||
!(TransactionIdDidAbort(event->dte_item[i].dti_done_xid)))
|
||||
TransactionIdIsValid(event->dte_item[i].dti_done_xid) &&
|
||||
!(TransactionIdDidAbort(event->dte_item[i].dti_done_xid)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@@ -2097,8 +2098,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
{
|
||||
/*
|
||||
* We can drop an item if it's done, but only if we're not
|
||||
* inside a subtransaction because it could abort later on.
|
||||
* We will want to check the item again if it does.
|
||||
* inside a subtransaction because it could abort later on. We
|
||||
* will want to check the item again if it does.
|
||||
*/
|
||||
if (immediate_only && !IsSubTransaction())
|
||||
{
|
||||
@@ -2209,8 +2210,8 @@ DeferredTriggerEndXact(void)
|
||||
/*
|
||||
* Forget everything we know about deferred triggers.
|
||||
*
|
||||
* Since all the info is in TopTransactionContext or children thereof,
|
||||
* we need do nothing special to reclaim memory.
|
||||
* Since all the info is in TopTransactionContext or children thereof, we
|
||||
* need do nothing special to reclaim memory.
|
||||
*/
|
||||
deferredTriggers = NULL;
|
||||
}
|
||||
@@ -2236,8 +2237,8 @@ DeferredTriggerAbortXact(void)
|
||||
/*
|
||||
* Forget everything we know about deferred triggers.
|
||||
*
|
||||
* Since all the info is in TopTransactionContext or children thereof,
|
||||
* we need do nothing special to reclaim memory.
|
||||
* Since all the info is in TopTransactionContext or children thereof, we
|
||||
* need do nothing special to reclaim memory.
|
||||
*/
|
||||
deferredTriggers = NULL;
|
||||
}
|
||||
@@ -2285,13 +2286,13 @@ DeferredTriggerBeginSubXact(void)
|
||||
|
||||
deferredTriggers->tail_stack = (DeferredTriggerEvent *)
|
||||
repalloc(deferredTriggers->tail_stack,
|
||||
deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
|
||||
deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
|
||||
deferredTriggers->imm_stack = (DeferredTriggerEvent *)
|
||||
repalloc(deferredTriggers->imm_stack,
|
||||
deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
|
||||
deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
|
||||
deferredTriggers->state_stack = (DeferredTriggerState *)
|
||||
repalloc(deferredTriggers->state_stack,
|
||||
deferredTriggers->numalloc * sizeof(DeferredTriggerState));
|
||||
deferredTriggers->numalloc * sizeof(DeferredTriggerState));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2358,8 +2359,8 @@ DeferredTriggerEndSubXact(bool isCommit)
|
||||
deferredTriggers->tail_thisxact->dte_next = NULL;
|
||||
|
||||
/*
|
||||
* We don't need to free the items, since the CurTransactionContext
|
||||
* will be reset shortly.
|
||||
* We don't need to free the items, since the
|
||||
* CurTransactionContext will be reset shortly.
|
||||
*/
|
||||
|
||||
/*
|
||||
@@ -2393,7 +2394,7 @@ DeferredTriggerStateCreate(int numalloc)
|
||||
state = (DeferredTriggerState)
|
||||
MemoryContextAllocZero(TopTransactionContext,
|
||||
sizeof(DeferredTriggerStateData) +
|
||||
(numalloc - 1) * sizeof(DeferredTriggerStatusData));
|
||||
(numalloc - 1) *sizeof(DeferredTriggerStatusData));
|
||||
|
||||
state->numalloc = numalloc;
|
||||
|
||||
@@ -2429,13 +2430,13 @@ DeferredTriggerStateAddItem(DeferredTriggerState state,
|
||||
{
|
||||
if (state->numstates >= state->numalloc)
|
||||
{
|
||||
int newalloc = state->numalloc * 2;
|
||||
int newalloc = state->numalloc * 2;
|
||||
|
||||
newalloc = Max(newalloc, 8); /* in case original has size 0 */
|
||||
newalloc = Max(newalloc, 8); /* in case original has size 0 */
|
||||
state = (DeferredTriggerState)
|
||||
repalloc(state,
|
||||
sizeof(DeferredTriggerStateData) +
|
||||
(newalloc - 1) * sizeof(DeferredTriggerStatusData));
|
||||
(newalloc - 1) *sizeof(DeferredTriggerStatusData));
|
||||
state->numalloc = newalloc;
|
||||
Assert(state->numstates < state->numalloc);
|
||||
}
|
||||
@@ -2463,8 +2464,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If in a subtransaction, and we didn't save the current state already,
|
||||
* save it so it can be restored if the subtransaction aborts.
|
||||
* If in a subtransaction, and we didn't save the current state
|
||||
* already, save it so it can be restored if the subtransaction
|
||||
* aborts.
|
||||
*/
|
||||
if (deferredTriggers->numpushed > 0 &&
|
||||
deferredTriggers->state_stack[deferredTriggers->numpushed - 1] == NULL)
|
||||
@@ -2686,7 +2688,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
|
||||
return;
|
||||
|
||||
/*
|
||||
* Create a new event. We use the CurTransactionContext so the event
|
||||
* Create a new event. We use the CurTransactionContext so the event
|
||||
* will automatically go away if the subtransaction aborts.
|
||||
*/
|
||||
oldcxt = MemoryContextSwitchTo(CurTransactionContext);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.62 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.63 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@@ -302,8 +302,8 @@ DefineType(List *names, List *parameters)
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("type output function %s must return type \"cstring\"",
|
||||
NameListToString(outputName))));
|
||||
errmsg("type output function %s must return type \"cstring\"",
|
||||
NameListToString(outputName))));
|
||||
}
|
||||
if (receiveOid)
|
||||
{
|
||||
@@ -311,8 +311,8 @@ DefineType(List *names, List *parameters)
|
||||
if (resulttype != typoid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("type receive function %s must return type %s",
|
||||
NameListToString(receiveName), typeName)));
|
||||
errmsg("type receive function %s must return type %s",
|
||||
NameListToString(receiveName), typeName)));
|
||||
}
|
||||
if (sendOid)
|
||||
{
|
||||
@@ -320,13 +320,14 @@ DefineType(List *names, List *parameters)
|
||||
if (resulttype != BYTEAOID)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("type send function %s must return type \"bytea\"",
|
||||
NameListToString(sendName))));
|
||||
errmsg("type send function %s must return type \"bytea\"",
|
||||
NameListToString(sendName))));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert analysis function proc name to an OID. If no analysis function
|
||||
* is specified, we'll use zero to select the built-in default algorithm.
|
||||
* Convert analysis function proc name to an OID. If no analysis
|
||||
* function is specified, we'll use zero to select the built-in
|
||||
* default algorithm.
|
||||
*/
|
||||
if (analyzeName)
|
||||
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
|
||||
@@ -691,7 +692,7 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
case CONSTR_UNIQUE:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("unique constraints not possible for domains")));
|
||||
errmsg("unique constraints not possible for domains")));
|
||||
break;
|
||||
|
||||
case CONSTR_PRIMARY:
|
||||
@@ -932,8 +933,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
|
||||
* arguments (data value, element OID).
|
||||
*
|
||||
* For backwards compatibility we allow OPAQUE in place of the actual
|
||||
* type name; if we see this, we issue a warning and fix up the pg_proc
|
||||
* entry.
|
||||
* type name; if we see this, we issue a warning and fix up the
|
||||
* pg_proc entry.
|
||||
*/
|
||||
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
|
||||
|
||||
@@ -967,8 +968,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
|
||||
{
|
||||
/* Found, but must complain and fix the pg_proc entry */
|
||||
ereport(WARNING,
|
||||
(errmsg("changing argument type of function %s from \"opaque\" to %s",
|
||||
NameListToString(procname), format_type_be(typeOid))));
|
||||
(errmsg("changing argument type of function %s from \"opaque\" to %s",
|
||||
NameListToString(procname), format_type_be(typeOid))));
|
||||
SetFunctionArgType(procOid, 0, typeOid);
|
||||
|
||||
/*
|
||||
@@ -1062,7 +1063,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
|
||||
Oid procOid;
|
||||
|
||||
/*
|
||||
* Analyze functions always take one INTERNAL argument and return bool.
|
||||
* Analyze functions always take one INTERNAL argument and return
|
||||
* bool.
|
||||
*/
|
||||
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
|
||||
|
||||
@@ -1078,8 +1080,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
|
||||
if (get_func_rettype(procOid) != BOOLOID)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("type analyze function %s must return type \"boolean\"",
|
||||
NameListToString(procname))));
|
||||
errmsg("type analyze function %s must return type \"boolean\"",
|
||||
NameListToString(procname))));
|
||||
|
||||
return procOid;
|
||||
}
|
||||
@@ -1110,8 +1112,8 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
|
||||
errmsg("composite type must have at least one attribute")));
|
||||
|
||||
/*
|
||||
* now set the parameters for keys/inheritance etc. All of these
|
||||
* are uninteresting for composite types...
|
||||
* now set the parameters for keys/inheritance etc. All of these are
|
||||
* uninteresting for composite types...
|
||||
*/
|
||||
createStmt->relation = (RangeVar *) typevar;
|
||||
createStmt->tableElts = coldeflist;
|
||||
@@ -1337,8 +1339,8 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_NOT_NULL_VIOLATION),
|
||||
errmsg("column \"%s\" of table \"%s\" contains null values",
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname),
|
||||
RelationGetRelationName(testrel))));
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname),
|
||||
RelationGetRelationName(testrel))));
|
||||
}
|
||||
}
|
||||
heap_endscan(scan);
|
||||
@@ -1499,7 +1501,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
if (IsA(newConstraint, FkConstraint))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("foreign key constraints not possible for domains")));
|
||||
errmsg("foreign key constraints not possible for domains")));
|
||||
|
||||
/* otherwise it should be a plain Constraint */
|
||||
if (!IsA(newConstraint, Constraint))
|
||||
@@ -1517,13 +1519,13 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
case CONSTR_UNIQUE:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("unique constraints not possible for domains")));
|
||||
errmsg("unique constraints not possible for domains")));
|
||||
break;
|
||||
|
||||
case CONSTR_PRIMARY:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
errmsg("primary key constraints not possible for domains")));
|
||||
errmsg("primary key constraints not possible for domains")));
|
||||
break;
|
||||
|
||||
case CONSTR_ATTR_DEFERRABLE:
|
||||
@@ -1604,7 +1606,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_CHECK_VIOLATION),
|
||||
errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint",
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname),
|
||||
NameStr(tupdesc->attrs[attnum - 1]->attname),
|
||||
RelationGetRelationName(testrel))));
|
||||
}
|
||||
|
||||
@@ -2078,9 +2080,9 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
|
||||
typTup = (Form_pg_type) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
* If it's a composite type, we need to check that it really is a
|
||||
* free-standing composite type, and not a table's underlying type.
|
||||
* We want people to use ALTER TABLE not ALTER TYPE for that case.
|
||||
* If it's a composite type, we need to check that it really is a
|
||||
* free-standing composite type, and not a table's underlying type. We
|
||||
* want people to use ALTER TABLE not ALTER TYPE for that case.
|
||||
*/
|
||||
if (typTup->typtype == 'c' && get_rel_relkind(typTup->typrelid) != 'c')
|
||||
ereport(ERROR,
|
||||
@@ -2088,7 +2090,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
|
||||
errmsg("\"%s\" is a table's row type",
|
||||
TypeNameToString(typename))));
|
||||
|
||||
/*
|
||||
/*
|
||||
* If the new owner is the same as the existing owner, consider the
|
||||
* command to have succeeded. This is for dump restoration purposes.
|
||||
*/
|
||||
@@ -2100,7 +2102,10 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to change owner")));
|
||||
|
||||
/* Modify the owner --- okay to scribble on typTup because it's a copy */
|
||||
/*
|
||||
* Modify the owner --- okay to scribble on typTup because it's a
|
||||
* copy
|
||||
*/
|
||||
typTup->typowner = newOwnerSysId;
|
||||
|
||||
simple_heap_update(rel, &tup->t_self, tup);
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.143 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.144 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -46,10 +46,10 @@ extern bool Password_encryption;
|
||||
|
||||
/*
|
||||
* The need-to-update-files flags are a pair of TransactionIds that show what
|
||||
* level of the transaction tree requested the update. To register an update,
|
||||
* level of the transaction tree requested the update. To register an update,
|
||||
* the transaction saves its own TransactionId in the flag, unless the value
|
||||
* was already set to a valid TransactionId. If it aborts and the value is its
|
||||
* TransactionId, it resets the value to InvalidTransactionId. If it commits,
|
||||
* TransactionId, it resets the value to InvalidTransactionId. If it commits,
|
||||
* it changes the value to its parent's TransactionId. This way the value is
|
||||
* propagated up to the topmost transaction, which will update the files if a
|
||||
* valid TransactionId is detected.
|
||||
@@ -169,7 +169,7 @@ write_group_file(Relation grel)
|
||||
if (fp == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write to temporary file \"%s\": %m", tempname)));
|
||||
errmsg("could not write to temporary file \"%s\": %m", tempname)));
|
||||
|
||||
/*
|
||||
* Read pg_group and write the file. Note we use SnapshotSelf to
|
||||
@@ -316,7 +316,7 @@ write_user_file(Relation urel)
|
||||
if (fp == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write to temporary file \"%s\": %m", tempname)));
|
||||
errmsg("could not write to temporary file \"%s\": %m", tempname)));
|
||||
|
||||
/*
|
||||
* Read pg_shadow and write the file. Note we use SnapshotSelf to
|
||||
@@ -1009,7 +1009,7 @@ AlterUserSet(AlterUserSetStmt *stmt)
|
||||
errmsg("user \"%s\" does not exist", stmt->user)));
|
||||
|
||||
if (!(superuser() ||
|
||||
((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
|
||||
((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("permission denied")));
|
||||
@@ -1216,14 +1216,14 @@ RenameUser(const char *oldname, const char *newname)
|
||||
char repl_null[Natts_pg_shadow];
|
||||
char repl_repl[Natts_pg_shadow];
|
||||
int i;
|
||||
|
||||
|
||||
/* ExclusiveLock because we need to update the password file */
|
||||
rel = heap_openr(ShadowRelationName, ExclusiveLock);
|
||||
dsc = RelationGetDescr(rel);
|
||||
|
||||
oldtuple = SearchSysCache(SHADOWNAME,
|
||||
CStringGetDatum(oldname),
|
||||
0, 0, 0);
|
||||
CStringGetDatum(oldname),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(oldtuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
@@ -1259,7 +1259,7 @@ RenameUser(const char *oldname, const char *newname)
|
||||
|
||||
repl_repl[Anum_pg_shadow_usename - 1] = 'r';
|
||||
repl_val[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
|
||||
CStringGetDatum(newname));
|
||||
CStringGetDatum(newname));
|
||||
repl_null[Anum_pg_shadow_usename - 1] = ' ';
|
||||
|
||||
datum = heap_getattr(oldtuple, Anum_pg_shadow_passwd, dsc, &isnull);
|
||||
@@ -1269,14 +1269,14 @@ RenameUser(const char *oldname, const char *newname)
|
||||
/* MD5 uses the username as salt, so just clear it on a rename */
|
||||
repl_repl[Anum_pg_shadow_passwd - 1] = 'r';
|
||||
repl_null[Anum_pg_shadow_passwd - 1] = 'n';
|
||||
|
||||
|
||||
ereport(NOTICE,
|
||||
(errmsg("MD5 password cleared because of user rename")));
|
||||
(errmsg("MD5 password cleared because of user rename")));
|
||||
}
|
||||
|
||||
|
||||
newtuple = heap_modifytuple(oldtuple, rel, repl_val, repl_null, repl_repl);
|
||||
simple_heap_update(rel, &oldtuple->t_self, newtuple);
|
||||
|
||||
|
||||
CatalogUpdateIndexes(rel, newtuple);
|
||||
|
||||
ReleaseSysCache(oldtuple);
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.288 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.289 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -106,7 +106,7 @@ typedef struct VRelStats
|
||||
* As these variables always appear together, we put them into one struct
|
||||
* and pull initialization and cleanup into separate routines.
|
||||
* ExecContext is used by repair_frag() and move_xxx_tuple(). More
|
||||
* accurately: It is *used* only in move_xxx_tuple(), but because this
|
||||
* accurately: It is *used* only in move_xxx_tuple(), but because this
|
||||
* routine is called many times, we initialize the struct just once in
|
||||
* repair_frag() and pass it on to move_xxx_tuple().
|
||||
*/
|
||||
@@ -131,9 +131,9 @@ ExecContext_Init(ExecContext ec, Relation rel)
|
||||
ec->estate = CreateExecutorState();
|
||||
|
||||
ec->resultRelInfo = makeNode(ResultRelInfo);
|
||||
ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
|
||||
ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
|
||||
ec->resultRelInfo->ri_RelationDesc = rel;
|
||||
ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
|
||||
ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
|
||||
|
||||
ExecOpenIndices(ec->resultRelInfo);
|
||||
|
||||
@@ -154,6 +154,7 @@ ExecContext_Finish(ExecContext ec)
|
||||
ExecCloseIndices(ec->resultRelInfo);
|
||||
FreeExecutorState(ec->estate);
|
||||
}
|
||||
|
||||
/*
|
||||
* End of ExecContext Implementation
|
||||
*----------------------------------------------------------------------
|
||||
@@ -182,16 +183,16 @@ static void repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
VacPageList vacuum_pages, VacPageList fraged_pages,
|
||||
int nindexes, Relation *Irel);
|
||||
static void move_chain_tuple(Relation rel,
|
||||
Buffer old_buf, Page old_page, HeapTuple old_tup,
|
||||
Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
|
||||
ExecContext ec, ItemPointer ctid, bool cleanVpd);
|
||||
Buffer old_buf, Page old_page, HeapTuple old_tup,
|
||||
Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
|
||||
ExecContext ec, ItemPointer ctid, bool cleanVpd);
|
||||
static void move_plain_tuple(Relation rel,
|
||||
Buffer old_buf, Page old_page, HeapTuple old_tup,
|
||||
Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
|
||||
ExecContext ec);
|
||||
Buffer old_buf, Page old_page, HeapTuple old_tup,
|
||||
Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
|
||||
ExecContext ec);
|
||||
static void update_hint_bits(Relation rel, VacPageList fraged_pages,
|
||||
int num_fraged_pages, BlockNumber last_move_dest_block,
|
||||
int num_moved);
|
||||
int num_fraged_pages, BlockNumber last_move_dest_block,
|
||||
int num_moved);
|
||||
static void vacuum_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
VacPageList vacpagelist);
|
||||
static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage);
|
||||
@@ -248,11 +249,11 @@ vacuum(VacuumStmt *vacstmt)
|
||||
* Furthermore, the forced commit that occurs before truncating the
|
||||
* relation's file would have the effect of committing the rest of the
|
||||
* user's transaction too, which would certainly not be the desired
|
||||
* behavior. (This only applies to VACUUM FULL, though. We could
|
||||
* in theory run lazy VACUUM inside a transaction block, but we choose
|
||||
* to disallow that case because we'd rather commit as soon as possible
|
||||
* after finishing the vacuum. This is mainly so that we can let go the
|
||||
* AccessExclusiveLock that we may be holding.)
|
||||
* behavior. (This only applies to VACUUM FULL, though. We could in
|
||||
* theory run lazy VACUUM inside a transaction block, but we choose to
|
||||
* disallow that case because we'd rather commit as soon as possible
|
||||
* after finishing the vacuum. This is mainly so that we can let go
|
||||
* the AccessExclusiveLock that we may be holding.)
|
||||
*
|
||||
* ANALYZE (without VACUUM) can run either way.
|
||||
*/
|
||||
@@ -262,9 +263,7 @@ vacuum(VacuumStmt *vacstmt)
|
||||
in_outer_xact = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
in_outer_xact = IsInTransactionChain((void *) vacstmt);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send info about dead objects to the statistics collector
|
||||
@@ -296,22 +295,21 @@ vacuum(VacuumStmt *vacstmt)
|
||||
/*
|
||||
* It's a database-wide VACUUM.
|
||||
*
|
||||
* Compute the initially applicable OldestXmin and FreezeLimit
|
||||
* XIDs, so that we can record these values at the end of the
|
||||
* VACUUM. Note that individual tables may well be processed
|
||||
* with newer values, but we can guarantee that no
|
||||
* (non-shared) relations are processed with older ones.
|
||||
* Compute the initially applicable OldestXmin and FreezeLimit XIDs,
|
||||
* so that we can record these values at the end of the VACUUM.
|
||||
* Note that individual tables may well be processed with newer
|
||||
* values, but we can guarantee that no (non-shared) relations are
|
||||
* processed with older ones.
|
||||
*
|
||||
* It is okay to record non-shared values in pg_database, even
|
||||
* though we may vacuum shared relations with older cutoffs,
|
||||
* because only the minimum of the values present in
|
||||
* pg_database matters. We can be sure that shared relations
|
||||
* have at some time been vacuumed with cutoffs no worse than
|
||||
* the global minimum; for, if there is a backend in some
|
||||
* other DB with xmin = OLDXMIN that's determining the cutoff
|
||||
* with which we vacuum shared relations, it is not possible
|
||||
* for that database to have a cutoff newer than OLDXMIN
|
||||
* recorded in pg_database.
|
||||
* It is okay to record non-shared values in pg_database, even though
|
||||
* we may vacuum shared relations with older cutoffs, because only
|
||||
* the minimum of the values present in pg_database matters. We
|
||||
* can be sure that shared relations have at some time been
|
||||
* vacuumed with cutoffs no worse than the global minimum; for, if
|
||||
* there is a backend in some other DB with xmin = OLDXMIN that's
|
||||
* determining the cutoff with which we vacuum shared relations,
|
||||
* it is not possible for that database to have a cutoff newer
|
||||
* than OLDXMIN recorded in pg_database.
|
||||
*/
|
||||
vacuum_set_xid_limits(vacstmt, false,
|
||||
&initialOldestXmin,
|
||||
@@ -321,8 +319,8 @@ vacuum(VacuumStmt *vacstmt)
|
||||
/*
|
||||
* Decide whether we need to start/commit our own transactions.
|
||||
*
|
||||
* For VACUUM (with or without ANALYZE): always do so, so that we
|
||||
* can release locks as soon as possible. (We could possibly use the
|
||||
* For VACUUM (with or without ANALYZE): always do so, so that we can
|
||||
* release locks as soon as possible. (We could possibly use the
|
||||
* outer transaction for a one-table VACUUM, but handling TOAST tables
|
||||
* would be problematic.)
|
||||
*
|
||||
@@ -333,9 +331,7 @@ vacuum(VacuumStmt *vacstmt)
|
||||
* locks sooner.
|
||||
*/
|
||||
if (vacstmt->vacuum)
|
||||
{
|
||||
use_own_xacts = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
Assert(vacstmt->analyze);
|
||||
@@ -359,10 +355,10 @@ vacuum(VacuumStmt *vacstmt)
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
/*
|
||||
* vacuum_rel expects to be entered with no transaction active; it will
|
||||
* start and commit its own transaction. But we are called by an SQL
|
||||
* command, and so we are executing inside a transaction already. We
|
||||
* commit the transaction started in PostgresMain() here, and start
|
||||
* vacuum_rel expects to be entered with no transaction active; it
|
||||
* will start and commit its own transaction. But we are called by an
|
||||
* SQL command, and so we are executing inside a transaction already.
|
||||
* We commit the transaction started in PostgresMain() here, and start
|
||||
* another one before exiting to match the commit waiting for us back
|
||||
* in PostgresMain().
|
||||
*/
|
||||
@@ -390,24 +386,24 @@ vacuum(VacuumStmt *vacstmt)
|
||||
if (vacstmt->vacuum)
|
||||
{
|
||||
if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
|
||||
all_rels = false; /* forget about updating dbstats */
|
||||
all_rels = false; /* forget about updating dbstats */
|
||||
}
|
||||
if (vacstmt->analyze)
|
||||
{
|
||||
MemoryContext old_context = NULL;
|
||||
|
||||
/*
|
||||
* If using separate xacts, start one for analyze. Otherwise,
|
||||
* we can use the outer transaction, but we still need to call
|
||||
* analyze_rel in a memory context that will be cleaned up on
|
||||
* return (else we leak memory while processing multiple
|
||||
* tables).
|
||||
* If using separate xacts, start one for analyze.
|
||||
* Otherwise, we can use the outer transaction, but we
|
||||
* still need to call analyze_rel in a memory context that
|
||||
* will be cleaned up on return (else we leak memory while
|
||||
* processing multiple tables).
|
||||
*/
|
||||
if (use_own_xacts)
|
||||
{
|
||||
StartTransactionCommand();
|
||||
SetQuerySnapshot(); /* might be needed for functions
|
||||
* in indexes */
|
||||
SetQuerySnapshot(); /* might be needed for functions
|
||||
* in indexes */
|
||||
}
|
||||
else
|
||||
old_context = MemoryContextSwitchTo(anl_context);
|
||||
@@ -873,8 +869,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
|
||||
* indexes */
|
||||
|
||||
/*
|
||||
* Tell the cache replacement strategy that vacuum is causing
|
||||
* all following IO
|
||||
* Tell the cache replacement strategy that vacuum is causing all
|
||||
* following IO
|
||||
*/
|
||||
StrategyHintVacuum(true);
|
||||
|
||||
@@ -932,9 +928,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that it's a plain table; we used to do this in
|
||||
* get_rel_oids() but seems safer to check after we've locked the
|
||||
* relation.
|
||||
* Check that it's a plain table; we used to do this in get_rel_oids()
|
||||
* but seems safer to check after we've locked the relation.
|
||||
*/
|
||||
if (onerel->rd_rel->relkind != expected_relkind)
|
||||
{
|
||||
@@ -1201,7 +1196,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
|
||||
if (PageIsNew(page))
|
||||
{
|
||||
VacPage vacpagecopy;
|
||||
VacPage vacpagecopy;
|
||||
|
||||
ereport(WARNING,
|
||||
(errmsg("relation \"%s\" page %u is uninitialized --- fixing",
|
||||
@@ -1220,7 +1215,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
|
||||
if (PageIsEmpty(page))
|
||||
{
|
||||
VacPage vacpagecopy;
|
||||
VacPage vacpagecopy;
|
||||
|
||||
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
|
||||
free_space += vacpage->free;
|
||||
@@ -1424,7 +1419,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
|
||||
if (do_reap || do_frag)
|
||||
{
|
||||
VacPage vacpagecopy = copy_vac_page(vacpage);
|
||||
VacPage vacpagecopy = copy_vac_page(vacpage);
|
||||
|
||||
if (do_reap)
|
||||
vpage_insert(vacuum_pages, vacpagecopy);
|
||||
if (do_frag)
|
||||
@@ -1504,9 +1500,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
RelationGetRelationName(onerel),
|
||||
tups_vacuumed, num_tuples, nblocks),
|
||||
errdetail("%.0f dead row versions cannot be removed yet.\n"
|
||||
"Nonremovable row versions range from %lu to %lu bytes long.\n"
|
||||
"Nonremovable row versions range from %lu to %lu bytes long.\n"
|
||||
"There were %.0f unused item pointers.\n"
|
||||
"Total free space (including removable row versions) is %.0f bytes.\n"
|
||||
"Total free space (including removable row versions) is %.0f bytes.\n"
|
||||
"%u pages are or will become empty, including %u at the end of the table.\n"
|
||||
"%u pages containing %.0f free bytes are potential move destinations.\n"
|
||||
"%s",
|
||||
@@ -1544,7 +1540,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
BlockNumber last_move_dest_block = 0,
|
||||
last_vacuum_block;
|
||||
Page dst_page = NULL;
|
||||
ExecContextData ec;
|
||||
ExecContextData ec;
|
||||
VacPageListData Nvacpagelist;
|
||||
VacPage dst_vacpage = NULL,
|
||||
last_vacuum_page,
|
||||
@@ -1595,13 +1591,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
blkno > last_move_dest_block;
|
||||
blkno--)
|
||||
{
|
||||
Buffer buf;
|
||||
Page page;
|
||||
OffsetNumber offnum,
|
||||
maxoff;
|
||||
bool isempty,
|
||||
dowrite,
|
||||
chain_tuple_moved;
|
||||
Buffer buf;
|
||||
Page page;
|
||||
OffsetNumber offnum,
|
||||
maxoff;
|
||||
bool isempty,
|
||||
dowrite,
|
||||
chain_tuple_moved;
|
||||
|
||||
vacuum_delay_point();
|
||||
|
||||
@@ -1678,9 +1674,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
offnum <= maxoff;
|
||||
offnum = OffsetNumberNext(offnum))
|
||||
{
|
||||
Size tuple_len;
|
||||
HeapTupleData tuple;
|
||||
ItemId itemid = PageGetItemId(page, offnum);
|
||||
Size tuple_len;
|
||||
HeapTupleData tuple;
|
||||
ItemId itemid = PageGetItemId(page, offnum);
|
||||
|
||||
if (!ItemIdIsUsed(itemid))
|
||||
continue;
|
||||
@@ -1693,29 +1689,29 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
/*
|
||||
* VACUUM FULL has an exclusive lock on the relation. So
|
||||
* normally no other transaction can have pending INSERTs or
|
||||
* DELETEs in this relation. A tuple is either
|
||||
* (a) a tuple in a system catalog, inserted or deleted by
|
||||
* a not yet committed transaction or
|
||||
* (b) dead (XMIN_INVALID or XMAX_COMMITTED) or
|
||||
* (c) inserted by a committed xact (XMIN_COMMITTED) or
|
||||
* (d) moved by the currently running VACUUM.
|
||||
* In case (a) we wouldn't be in repair_frag() at all.
|
||||
* DELETEs in this relation. A tuple is either (a) a tuple in
|
||||
* a system catalog, inserted or deleted by a not yet
|
||||
* committed transaction or (b) dead (XMIN_INVALID or
|
||||
* XMAX_COMMITTED) or (c) inserted by a committed xact
|
||||
* (XMIN_COMMITTED) or (d) moved by the currently running
|
||||
* VACUUM. In case (a) we wouldn't be in repair_frag() at all.
|
||||
* In case (b) we cannot be here, because scan_heap() has
|
||||
* already marked the item as unused, see continue above.
|
||||
* Case (c) is what normally is to be expected.
|
||||
* Case (d) is only possible, if a whole tuple chain has been
|
||||
* moved while processing this or a higher numbered block.
|
||||
* already marked the item as unused, see continue above. Case
|
||||
* (c) is what normally is to be expected. Case (d) is only
|
||||
* possible, if a whole tuple chain has been moved while
|
||||
* processing this or a higher numbered block.
|
||||
*/
|
||||
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
|
||||
{
|
||||
/*
|
||||
* There cannot be another concurrently running VACUUM. If
|
||||
* the tuple had been moved in by a previous VACUUM, the
|
||||
* visibility check would have set XMIN_COMMITTED. If the
|
||||
* tuple had been moved in by the currently running VACUUM,
|
||||
* the loop would have been terminated. We had
|
||||
* There cannot be another concurrently running VACUUM.
|
||||
* If the tuple had been moved in by a previous VACUUM,
|
||||
* the visibility check would have set XMIN_COMMITTED. If
|
||||
* the tuple had been moved in by the currently running
|
||||
* VACUUM, the loop would have been terminated. We had
|
||||
* elog(ERROR, ...) here, but as we are testing for a
|
||||
* can't-happen condition, Assert() seems more appropriate.
|
||||
* can't-happen condition, Assert() seems more
|
||||
* appropriate.
|
||||
*/
|
||||
Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN));
|
||||
|
||||
@@ -1725,6 +1721,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
* moved while cleaning this page or some previous one.
|
||||
*/
|
||||
Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF);
|
||||
|
||||
/*
|
||||
* MOVED_OFF by another VACUUM would have caused the
|
||||
* visibility check to set XMIN_COMMITTED or XMIN_INVALID.
|
||||
@@ -1734,16 +1731,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
/* Can't we Assert(keep_tuples > 0) here? */
|
||||
if (keep_tuples == 0)
|
||||
continue;
|
||||
if (chain_tuple_moved) /* some chains was moved
|
||||
* while */
|
||||
{ /* cleaning this page */
|
||||
if (chain_tuple_moved) /* some chains was moved while */
|
||||
{ /* cleaning this page */
|
||||
Assert(vacpage->offsets_free > 0);
|
||||
for (i = 0; i < vacpage->offsets_free; i++)
|
||||
{
|
||||
if (vacpage->offsets[i] == offnum)
|
||||
break;
|
||||
}
|
||||
if (i >= vacpage->offsets_free) /* not found */
|
||||
if (i >= vacpage->offsets_free) /* not found */
|
||||
{
|
||||
vacpage->offsets[vacpage->offsets_free++] = offnum;
|
||||
keep_tuples--;
|
||||
@@ -2128,18 +2124,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
off <= maxoff;
|
||||
off = OffsetNumberNext(off))
|
||||
{
|
||||
ItemId itemid = PageGetItemId(page, off);
|
||||
HeapTupleHeader htup;
|
||||
ItemId itemid = PageGetItemId(page, off);
|
||||
HeapTupleHeader htup;
|
||||
|
||||
if (!ItemIdIsUsed(itemid))
|
||||
continue;
|
||||
htup = (HeapTupleHeader) PageGetItem(page, itemid);
|
||||
if (htup->t_infomask & HEAP_XMIN_COMMITTED)
|
||||
continue;
|
||||
|
||||
/*
|
||||
** See comments in the walk-along-page loop above, why we
|
||||
** have Asserts here instead of if (...) elog(ERROR).
|
||||
*/
|
||||
* * See comments in the walk-along-page loop above, why
|
||||
* we * have Asserts here instead of if (...) elog(ERROR).
|
||||
*/
|
||||
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
|
||||
Assert(htup->t_infomask & HEAP_MOVED_OFF);
|
||||
Assert(HeapTupleHeaderGetXvac(htup) == myXID);
|
||||
@@ -2152,7 +2149,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
if (vacpage->offsets[i] == off)
|
||||
break;
|
||||
}
|
||||
if (i >= vacpage->offsets_free) /* not found */
|
||||
if (i >= vacpage->offsets_free) /* not found */
|
||||
{
|
||||
vacpage->offsets[vacpage->offsets_free++] = off;
|
||||
Assert(keep_tuples > 0);
|
||||
@@ -2247,7 +2244,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
*/
|
||||
update_hint_bits(onerel, fraged_pages, num_fraged_pages,
|
||||
last_move_dest_block, num_moved);
|
||||
|
||||
|
||||
/*
|
||||
* It'd be cleaner to make this report at the bottom of this routine,
|
||||
* but then the rusage would double-count the second pass of index
|
||||
@@ -2255,11 +2252,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
* processing that occurs below.
|
||||
*/
|
||||
ereport(elevel,
|
||||
(errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
|
||||
RelationGetRelationName(onerel),
|
||||
num_moved, nblocks, blkno),
|
||||
errdetail("%s",
|
||||
vac_show_rusage(&ru0))));
|
||||
(errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
|
||||
RelationGetRelationName(onerel),
|
||||
num_moved, nblocks, blkno),
|
||||
errdetail("%s",
|
||||
vac_show_rusage(&ru0))));
|
||||
|
||||
/*
|
||||
* Reflect the motion of system tuples to catalog cache here.
|
||||
@@ -2284,6 +2281,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
*vpleft = *vpright;
|
||||
*vpright = vpsave;
|
||||
}
|
||||
|
||||
/*
|
||||
* keep_tuples is the number of tuples that have been moved
|
||||
* off a page during chain moves but not been scanned over
|
||||
@@ -2301,13 +2299,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
if (vacpage->blkno == (blkno - 1) &&
|
||||
vacpage->offsets_free > 0)
|
||||
{
|
||||
Buffer buf;
|
||||
Page page;
|
||||
OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
|
||||
OffsetNumber offnum,
|
||||
maxoff;
|
||||
int uncnt;
|
||||
int num_tuples = 0;
|
||||
Buffer buf;
|
||||
Page page;
|
||||
OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
|
||||
OffsetNumber offnum,
|
||||
maxoff;
|
||||
int uncnt;
|
||||
int num_tuples = 0;
|
||||
|
||||
buf = ReadBuffer(onerel, vacpage->blkno);
|
||||
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
|
||||
@@ -2317,7 +2315,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
offnum <= maxoff;
|
||||
offnum = OffsetNumberNext(offnum))
|
||||
{
|
||||
ItemId itemid = PageGetItemId(page, offnum);
|
||||
ItemId itemid = PageGetItemId(page, offnum);
|
||||
HeapTupleHeader htup;
|
||||
|
||||
if (!ItemIdIsUsed(itemid))
|
||||
@@ -2327,9 +2325,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
continue;
|
||||
|
||||
/*
|
||||
** See comments in the walk-along-page loop above, why we
|
||||
** have Asserts here instead of if (...) elog(ERROR).
|
||||
*/
|
||||
* * See comments in the walk-along-page loop above, why
|
||||
* we * have Asserts here instead of if (...) elog(ERROR).
|
||||
*/
|
||||
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
|
||||
Assert(htup->t_infomask & HEAP_MOVED_OFF);
|
||||
Assert(HeapTupleHeaderGetXvac(htup) == myXID);
|
||||
@@ -2418,10 +2416,10 @@ move_chain_tuple(Relation rel,
|
||||
ExecContext ec, ItemPointer ctid, bool cleanVpd)
|
||||
{
|
||||
TransactionId myXID = GetCurrentTransactionId();
|
||||
HeapTupleData newtup;
|
||||
OffsetNumber newoff;
|
||||
ItemId newitemid;
|
||||
Size tuple_len = old_tup->t_len;
|
||||
HeapTupleData newtup;
|
||||
OffsetNumber newoff;
|
||||
ItemId newitemid;
|
||||
Size tuple_len = old_tup->t_len;
|
||||
|
||||
heap_copytuple_with_tuple(old_tup, &newtup);
|
||||
|
||||
@@ -2434,36 +2432,32 @@ move_chain_tuple(Relation rel,
|
||||
START_CRIT_SECTION();
|
||||
|
||||
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
|
||||
HEAP_XMIN_INVALID |
|
||||
HEAP_MOVED_IN);
|
||||
HEAP_XMIN_INVALID |
|
||||
HEAP_MOVED_IN);
|
||||
old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
|
||||
HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
|
||||
|
||||
/*
|
||||
* If this page was not used before - clean it.
|
||||
*
|
||||
* NOTE: a nasty bug used to lurk here. It is possible
|
||||
* for the source and destination pages to be the same
|
||||
* (since this tuple-chain member can be on a page
|
||||
* lower than the one we're currently processing in
|
||||
* the outer loop). If that's true, then after
|
||||
* vacuum_page() the source tuple will have been
|
||||
* moved, and tuple.t_data will be pointing at
|
||||
* garbage. Therefore we must do everything that uses
|
||||
* NOTE: a nasty bug used to lurk here. It is possible for the source
|
||||
* and destination pages to be the same (since this tuple-chain member
|
||||
* can be on a page lower than the one we're currently processing in
|
||||
* the outer loop). If that's true, then after vacuum_page() the
|
||||
* source tuple will have been moved, and tuple.t_data will be
|
||||
* pointing at garbage. Therefore we must do everything that uses
|
||||
* old_tup->t_data BEFORE this step!!
|
||||
*
|
||||
* This path is different from the other callers of
|
||||
* vacuum_page, because we have already incremented
|
||||
* the vacpage's offsets_used field to account for the
|
||||
* tuple(s) we expect to move onto the page. Therefore
|
||||
* vacuum_page's check for offsets_used == 0 is wrong.
|
||||
* But since that's a good debugging check for all
|
||||
* other callers, we work around it here rather than
|
||||
* remove it.
|
||||
* This path is different from the other callers of vacuum_page, because
|
||||
* we have already incremented the vacpage's offsets_used field to
|
||||
* account for the tuple(s) we expect to move onto the page. Therefore
|
||||
* vacuum_page's check for offsets_used == 0 is wrong. But since
|
||||
* that's a good debugging check for all other callers, we work around
|
||||
* it here rather than remove it.
|
||||
*/
|
||||
if (!PageIsEmpty(dst_page) && cleanVpd)
|
||||
{
|
||||
int sv_offsets_used = dst_vacpage->offsets_used;
|
||||
int sv_offsets_used = dst_vacpage->offsets_used;
|
||||
|
||||
dst_vacpage->offsets_used = 0;
|
||||
vacuum_page(rel, dst_buf, dst_vacpage);
|
||||
@@ -2471,8 +2465,8 @@ move_chain_tuple(Relation rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the state of the copied tuple, and store it
|
||||
* on the destination page.
|
||||
* Update the state of the copied tuple, and store it on the
|
||||
* destination page.
|
||||
*/
|
||||
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
|
||||
HEAP_XMIN_INVALID |
|
||||
@@ -2484,7 +2478,7 @@ move_chain_tuple(Relation rel,
|
||||
if (newoff == InvalidOffsetNumber)
|
||||
{
|
||||
elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain",
|
||||
(unsigned long) tuple_len, dst_vacpage->blkno);
|
||||
(unsigned long) tuple_len, dst_vacpage->blkno);
|
||||
}
|
||||
newitemid = PageGetItemId(dst_page, newoff);
|
||||
pfree(newtup.t_data);
|
||||
@@ -2509,8 +2503,7 @@ move_chain_tuple(Relation rel,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* No XLOG record, but still need to flag that XID
|
||||
* exists on disk
|
||||
* No XLOG record, but still need to flag that XID exists on disk
|
||||
*/
|
||||
MyXactMadeTempRelUpdate = true;
|
||||
}
|
||||
@@ -2518,9 +2511,8 @@ move_chain_tuple(Relation rel,
|
||||
END_CRIT_SECTION();
|
||||
|
||||
/*
|
||||
* Set new tuple's t_ctid pointing to itself for last
|
||||
* tuple in chain, and to next tuple in chain
|
||||
* otherwise.
|
||||
* Set new tuple's t_ctid pointing to itself for last tuple in chain,
|
||||
* and to next tuple in chain otherwise.
|
||||
*/
|
||||
/* Is this ok after log_heap_move() and END_CRIT_SECTION()? */
|
||||
if (!ItemPointerIsValid(ctid))
|
||||
@@ -2559,10 +2551,10 @@ move_plain_tuple(Relation rel,
|
||||
ExecContext ec)
|
||||
{
|
||||
TransactionId myXID = GetCurrentTransactionId();
|
||||
HeapTupleData newtup;
|
||||
OffsetNumber newoff;
|
||||
ItemId newitemid;
|
||||
Size tuple_len = old_tup->t_len;
|
||||
HeapTupleData newtup;
|
||||
OffsetNumber newoff;
|
||||
ItemId newitemid;
|
||||
Size tuple_len = old_tup->t_len;
|
||||
|
||||
/* copy tuple */
|
||||
heap_copytuple_with_tuple(old_tup, &newtup);
|
||||
@@ -2570,9 +2562,9 @@ move_plain_tuple(Relation rel,
|
||||
/*
|
||||
* register invalidation of source tuple in catcaches.
|
||||
*
|
||||
* (Note: we do not need to register the copied tuple, because we
|
||||
* are not changing the tuple contents and so there cannot be
|
||||
* any need to flush negative catcache entries.)
|
||||
* (Note: we do not need to register the copied tuple, because we are not
|
||||
* changing the tuple contents and so there cannot be any need to
|
||||
* flush negative catcache entries.)
|
||||
*/
|
||||
CacheInvalidateHeapTuple(rel, old_tup);
|
||||
|
||||
@@ -2609,8 +2601,8 @@ move_plain_tuple(Relation rel,
|
||||
* Mark old tuple as MOVED_OFF by me.
|
||||
*/
|
||||
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
|
||||
HEAP_XMIN_INVALID |
|
||||
HEAP_MOVED_IN);
|
||||
HEAP_XMIN_INVALID |
|
||||
HEAP_MOVED_IN);
|
||||
old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
|
||||
HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
|
||||
|
||||
@@ -2628,8 +2620,7 @@ move_plain_tuple(Relation rel,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* No XLOG record, but still need to flag that XID exists
|
||||
* on disk
|
||||
* No XLOG record, but still need to flag that XID exists on disk
|
||||
*/
|
||||
MyXactMadeTempRelUpdate = true;
|
||||
}
|
||||
@@ -2637,7 +2628,7 @@ move_plain_tuple(Relation rel,
|
||||
END_CRIT_SECTION();
|
||||
|
||||
dst_vacpage->free = ((PageHeader) dst_page)->pd_upper -
|
||||
((PageHeader) dst_page)->pd_lower;
|
||||
((PageHeader) dst_page)->pd_lower;
|
||||
LockBuffer(dst_buf, BUFFER_LOCK_UNLOCK);
|
||||
LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
@@ -2670,17 +2661,17 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
|
||||
{
|
||||
int checked_moved = 0;
|
||||
int i;
|
||||
VacPage *curpage;
|
||||
VacPage *curpage;
|
||||
|
||||
for (i = 0, curpage = fraged_pages->pagedesc;
|
||||
i < num_fraged_pages;
|
||||
i++, curpage++)
|
||||
{
|
||||
Buffer buf;
|
||||
Page page;
|
||||
OffsetNumber max_offset;
|
||||
OffsetNumber off;
|
||||
int num_tuples = 0;
|
||||
Buffer buf;
|
||||
Page page;
|
||||
OffsetNumber max_offset;
|
||||
OffsetNumber off;
|
||||
int num_tuples = 0;
|
||||
|
||||
vacuum_delay_point();
|
||||
|
||||
@@ -2696,17 +2687,18 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
|
||||
off <= max_offset;
|
||||
off = OffsetNumberNext(off))
|
||||
{
|
||||
ItemId itemid = PageGetItemId(page, off);
|
||||
HeapTupleHeader htup;
|
||||
ItemId itemid = PageGetItemId(page, off);
|
||||
HeapTupleHeader htup;
|
||||
|
||||
if (!ItemIdIsUsed(itemid))
|
||||
continue;
|
||||
htup = (HeapTupleHeader) PageGetItem(page, itemid);
|
||||
if (htup->t_infomask & HEAP_XMIN_COMMITTED)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* See comments in the walk-along-page loop above, why we
|
||||
* have Asserts here instead of if (...) elog(ERROR). The
|
||||
* See comments in the walk-along-page loop above, why we have
|
||||
* Asserts here instead of if (...) elog(ERROR). The
|
||||
* difference here is that we may see MOVED_IN.
|
||||
*/
|
||||
Assert(htup->t_infomask & HEAP_MOVED);
|
||||
@@ -2865,14 +2857,14 @@ scan_index(Relation indrel, double num_tuples)
|
||||
false);
|
||||
|
||||
ereport(elevel,
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
|
||||
/*
|
||||
* Check for tuple count mismatch. If the index is partial, then it's
|
||||
@@ -2932,16 +2924,16 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
false);
|
||||
|
||||
ereport(elevel,
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%.0f index row versions were removed.\n"
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%.0f index row versions were removed.\n"
|
||||
"%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->tuples_removed,
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
"%s",
|
||||
stats->tuples_removed,
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
|
||||
/*
|
||||
* Check for tuple count mismatch. If the index is partial, then it's
|
||||
@@ -3370,7 +3362,7 @@ vacuum_delay_point(void)
|
||||
if (VacuumCostActive && !InterruptPending &&
|
||||
VacuumCostBalance >= VacuumCostLimit)
|
||||
{
|
||||
int msec;
|
||||
int msec;
|
||||
|
||||
msec = VacuumCostDelay * VacuumCostBalance / VacuumCostLimit;
|
||||
if (msec > VacuumCostDelay * 4)
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.44 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.45 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -594,14 +594,14 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
|
||||
false);
|
||||
|
||||
ereport(elevel,
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
|
||||
pfree(stats);
|
||||
}
|
||||
@@ -654,16 +654,16 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
|
||||
false);
|
||||
|
||||
ereport(elevel,
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%.0f index row versions were removed.\n"
|
||||
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples,
|
||||
stats->num_pages),
|
||||
errdetail("%.0f index row versions were removed.\n"
|
||||
"%u index pages have been deleted, %u are currently reusable.\n"
|
||||
"%s",
|
||||
stats->tuples_removed,
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
"%s",
|
||||
stats->tuples_removed,
|
||||
stats->pages_deleted, stats->pages_free,
|
||||
vac_show_rusage(&ru0))));
|
||||
|
||||
pfree(stats);
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.100 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.101 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -62,7 +62,7 @@ assign_datestyle(const char *value, bool doit, GucSource source)
|
||||
if (source >= PGC_S_INTERACTIVE)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("invalid list syntax for parameter \"datestyle\"")));
|
||||
errmsg("invalid list syntax for parameter \"datestyle\"")));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -148,8 +148,8 @@ assign_datestyle(const char *value, bool doit, GucSource source)
|
||||
if (source >= PGC_S_INTERACTIVE)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("unrecognized \"datestyle\" key word: \"%s\"",
|
||||
tok)));
|
||||
errmsg("unrecognized \"datestyle\" key word: \"%s\"",
|
||||
tok)));
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
@@ -314,9 +314,10 @@ assign_timezone(const char *value, bool doit, GucSource source)
|
||||
*
|
||||
* During GUC initialization, since the timezone library isn't
|
||||
* set up yet, pg_get_current_timezone will return NULL and we
|
||||
* will leave the setting as UNKNOWN. If this isn't overridden
|
||||
* from the config file then pg_timezone_initialize() will
|
||||
* eventually select a default value from the environment.
|
||||
* will leave the setting as UNKNOWN. If this isn't
|
||||
* overridden from the config file then
|
||||
* pg_timezone_initialize() will eventually select a default
|
||||
* value from the environment.
|
||||
*/
|
||||
const char *curzone = pg_get_current_timezone();
|
||||
|
||||
@@ -329,13 +330,14 @@ assign_timezone(const char *value, bool doit, GucSource source)
|
||||
* Otherwise assume it is a timezone name.
|
||||
*
|
||||
* We have to actually apply the change before we can have any
|
||||
* hope of checking it. So, save the old value in case we have
|
||||
* to back out. We have to copy since pg_get_current_timezone
|
||||
* returns a pointer to its static state.
|
||||
* hope of checking it. So, save the old value in case we
|
||||
* have to back out. We have to copy since
|
||||
* pg_get_current_timezone returns a pointer to its static
|
||||
* state.
|
||||
*
|
||||
* This would all get a lot simpler if the TZ library had a better
|
||||
* API that would let us look up and test a timezone name without
|
||||
* making it the default.
|
||||
* This would all get a lot simpler if the TZ library had a
|
||||
* better API that would let us look up and test a timezone
|
||||
* name without making it the default.
|
||||
*/
|
||||
const char *cur_tz;
|
||||
char *save_tz;
|
||||
@@ -368,22 +370,23 @@ assign_timezone(const char *value, bool doit, GucSource source)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* TZ library wasn't initialized yet. Annoyingly, we will
|
||||
* come here during startup because guc-file.l checks
|
||||
* the value with doit = false before actually applying.
|
||||
* The best approach seems to be as follows:
|
||||
* TZ library wasn't initialized yet. Annoyingly, we
|
||||
* will come here during startup because guc-file.l
|
||||
* checks the value with doit = false before actually
|
||||
* applying. The best approach seems to be as follows:
|
||||
*
|
||||
* 1. known && acceptable: leave the setting in place,
|
||||
* since we'll apply it soon anyway. This is mainly
|
||||
* so that any log messages printed during this interval
|
||||
* are timestamped with the user's requested timezone.
|
||||
* so that any log messages printed during this
|
||||
* interval are timestamped with the user's requested
|
||||
* timezone.
|
||||
*
|
||||
* 2. known && !acceptable: revert to GMT for lack of
|
||||
* any better idea. (select_default_timezone() may get
|
||||
* 2. known && !acceptable: revert to GMT for lack of any
|
||||
* better idea. (select_default_timezone() may get
|
||||
* called later to undo this.)
|
||||
*
|
||||
* 3. !known: no need to do anything since TZ library
|
||||
* did not change its state.
|
||||
* 3. !known: no need to do anything since TZ library did
|
||||
* not change its state.
|
||||
*
|
||||
* Again, this should all go away sometime soon.
|
||||
*/
|
||||
@@ -441,7 +444,7 @@ assign_timezone(const char *value, bool doit, GucSource source)
|
||||
const char *
|
||||
show_timezone(void)
|
||||
{
|
||||
const char *tzn;
|
||||
const char *tzn;
|
||||
|
||||
if (HasCTZSet)
|
||||
{
|
||||
@@ -472,14 +475,14 @@ assign_XactIsoLevel(const char *value, bool doit, GucSource source)
|
||||
{
|
||||
if (doit && source >= PGC_S_INTERACTIVE)
|
||||
{
|
||||
if (SerializableSnapshot != NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query")));
|
||||
if (IsSubTransaction())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction")));
|
||||
if (SerializableSnapshot != NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query")));
|
||||
if (IsSubTransaction())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
|
||||
errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction")));
|
||||
}
|
||||
|
||||
if (strcmp(value, "serializable") == 0)
|
||||
@@ -596,7 +599,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source)
|
||||
* limit on names, so we can tell whether we're being passed an initial
|
||||
* username or a saved/restored value.
|
||||
*/
|
||||
extern char *session_authorization_string; /* in guc.c */
|
||||
extern char *session_authorization_string; /* in guc.c */
|
||||
|
||||
const char *
|
||||
assign_session_authorization(const char *value, bool doit, GucSource source)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.84 2004/08/29 04:12:30 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.85 2004/08/29 05:06:41 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -191,8 +191,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
|
||||
newattr->atttypmod != oldattr->atttypmod)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
||||
errmsg("cannot change data type of view column \"%s\"",
|
||||
NameStr(oldattr->attname))));
|
||||
errmsg("cannot change data type of view column \"%s\"",
|
||||
NameStr(oldattr->attname))));
|
||||
/* We can ignore the remaining attributes of an attribute... */
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.80 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.81 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -51,7 +51,7 @@
|
||||
* needs access to variables of the current outer tuple. (The handling of
|
||||
* this parameter is currently pretty inconsistent: some callers pass NULL
|
||||
* and some pass down their parent's value; so don't rely on it in other
|
||||
* situations. It'd probably be better to remove the whole thing and use
|
||||
* situations. It'd probably be better to remove the whole thing and use
|
||||
* the generalized parameter mechanism instead.)
|
||||
*/
|
||||
void
|
||||
@@ -64,7 +64,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
|
||||
/* If we have changed parameters, propagate that info */
|
||||
if (node->chgParam != NULL)
|
||||
{
|
||||
ListCell *l;
|
||||
ListCell *l;
|
||||
|
||||
foreach(l, node->initPlan)
|
||||
{
|
||||
@@ -365,19 +365,19 @@ ExecMayReturnRawTuples(PlanState *node)
|
||||
{
|
||||
/*
|
||||
* At a table scan node, we check whether ExecAssignScanProjectionInfo
|
||||
* decided to do projection or not. Most non-scan nodes always project
|
||||
* and so we can return "false" immediately. For nodes that don't
|
||||
* project but just pass up input tuples, we have to recursively
|
||||
* decided to do projection or not. Most non-scan nodes always
|
||||
* project and so we can return "false" immediately. For nodes that
|
||||
* don't project but just pass up input tuples, we have to recursively
|
||||
* examine the input plan node.
|
||||
*
|
||||
* Note: Hash and Material are listed here because they sometimes
|
||||
* return an original input tuple, not a copy. But Sort and SetOp
|
||||
* never return an original tuple, so they can be treated like
|
||||
* projecting nodes.
|
||||
* Note: Hash and Material are listed here because they sometimes return
|
||||
* an original input tuple, not a copy. But Sort and SetOp never
|
||||
* return an original tuple, so they can be treated like projecting
|
||||
* nodes.
|
||||
*/
|
||||
switch (nodeTag(node))
|
||||
{
|
||||
/* Table scan nodes */
|
||||
/* Table scan nodes */
|
||||
case T_SeqScanState:
|
||||
case T_IndexScanState:
|
||||
case T_TidScanState:
|
||||
@@ -387,7 +387,7 @@ ExecMayReturnRawTuples(PlanState *node)
|
||||
return true;
|
||||
break;
|
||||
|
||||
/* Non-projecting nodes */
|
||||
/* Non-projecting nodes */
|
||||
case T_HashState:
|
||||
case T_MaterialState:
|
||||
case T_UniqueState:
|
||||
@@ -395,19 +395,19 @@ ExecMayReturnRawTuples(PlanState *node)
|
||||
return ExecMayReturnRawTuples(node->lefttree);
|
||||
|
||||
case T_AppendState:
|
||||
{
|
||||
AppendState *appendstate = (AppendState *) node;
|
||||
int j;
|
||||
|
||||
for (j = 0; j < appendstate->as_nplans; j++)
|
||||
{
|
||||
if (ExecMayReturnRawTuples(appendstate->appendplans[j]))
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
AppendState *appendstate = (AppendState *) node;
|
||||
int j;
|
||||
|
||||
/* All projecting node types come here */
|
||||
for (j = 0; j < appendstate->as_nplans; j++)
|
||||
{
|
||||
if (ExecMayReturnRawTuples(appendstate->appendplans[j]))
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* All projecting node types come here */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.10 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.11 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -26,8 +26,8 @@
|
||||
static TupleHashTable CurTupleHashTable = NULL;
|
||||
|
||||
static uint32 TupleHashTableHash(const void *key, Size keysize);
|
||||
static int TupleHashTableMatch(const void *key1, const void *key2,
|
||||
Size keysize);
|
||||
static int TupleHashTableMatch(const void *key1, const void *key2,
|
||||
Size keysize);
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
@@ -303,7 +303,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
|
||||
Assert(entrysize >= sizeof(TupleHashEntryData));
|
||||
|
||||
hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt,
|
||||
sizeof(TupleHashTableData));
|
||||
sizeof(TupleHashTableData));
|
||||
|
||||
hashtable->numCols = numCols;
|
||||
hashtable->keyColIdx = keyColIdx;
|
||||
@@ -321,7 +321,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
|
||||
hash_ctl.hcxt = tablecxt;
|
||||
hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
|
||||
if (hashtable->hashtab == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||
@@ -359,8 +359,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
|
||||
/*
|
||||
* Set up data needed by hash and match functions
|
||||
*
|
||||
* We save and restore CurTupleHashTable just in case someone manages
|
||||
* to invoke this code re-entrantly.
|
||||
* We save and restore CurTupleHashTable just in case someone manages to
|
||||
* invoke this code re-entrantly.
|
||||
*/
|
||||
hashtable->tupdesc = tupdesc;
|
||||
saveCurHT = CurTupleHashTable;
|
||||
@@ -389,8 +389,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
|
||||
|
||||
/*
|
||||
* Zero any caller-requested space in the entry. (This zaps
|
||||
* the "key data" dynahash.c copied into the new entry, but
|
||||
* we don't care since we're about to overwrite it anyway.)
|
||||
* the "key data" dynahash.c copied into the new entry, but we
|
||||
* don't care since we're about to overwrite it anyway.)
|
||||
*/
|
||||
MemSet(entry, 0, hashtable->entrysize);
|
||||
|
||||
@@ -414,13 +414,13 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
|
||||
*
|
||||
* The passed-in key is a pointer to a HeapTuple pointer -- this is either
|
||||
* the firstTuple field of a TupleHashEntry struct, or the key value passed
|
||||
* to hash_search. We ignore the keysize.
|
||||
* to hash_search. We ignore the keysize.
|
||||
*
|
||||
* CurTupleHashTable must be set before calling this, since dynahash.c
|
||||
* doesn't provide any API that would let us get at the hashtable otherwise.
|
||||
*
|
||||
* Also, the caller must select an appropriate memory context for running
|
||||
* the hash functions. (dynahash.c doesn't change CurrentMemoryContext.)
|
||||
* the hash functions. (dynahash.c doesn't change CurrentMemoryContext.)
|
||||
*/
|
||||
static uint32
|
||||
TupleHashTableHash(const void *key, Size keysize)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.42 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.43 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -274,9 +274,9 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
|
||||
* dealing with a small number of attributes. for large tuples we just
|
||||
* use palloc.
|
||||
*
|
||||
* Note: we could use just one set of arrays if we were willing to
|
||||
* assume that the resno mapping is monotonic... I think it is, but
|
||||
* won't take the risk of breaking things right now.
|
||||
* Note: we could use just one set of arrays if we were willing to assume
|
||||
* that the resno mapping is monotonic... I think it is, but won't
|
||||
* take the risk of breaking things right now.
|
||||
*/
|
||||
if (cleanLength > 64)
|
||||
{
|
||||
@@ -309,7 +309,7 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
|
||||
*/
|
||||
for (i = 0; i < cleanLength; i++)
|
||||
{
|
||||
int j = cleanMap[i] - 1;
|
||||
int j = cleanMap[i] - 1;
|
||||
|
||||
values[i] = old_values[j];
|
||||
nulls[i] = old_nulls[j];
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.235 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.236 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -521,8 +521,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
* Multiple result relations (due to inheritance)
|
||||
* parseTree->resultRelations identifies them all
|
||||
*/
|
||||
ResultRelInfo *resultRelInfo;
|
||||
ListCell *l;
|
||||
ResultRelInfo *resultRelInfo;
|
||||
ListCell *l;
|
||||
|
||||
numResultRelations = list_length(resultRelations);
|
||||
resultRelInfos = (ResultRelInfo *)
|
||||
@@ -644,10 +644,10 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
|
||||
/*
|
||||
* Initialize the junk filter if needed. SELECT and INSERT queries
|
||||
* need a filter if there are any junk attrs in the tlist. INSERT and
|
||||
* SELECT INTO also need a filter if the plan may return raw disk tuples
|
||||
* (else heap_insert will be scribbling on the source relation!).
|
||||
* UPDATE and DELETE always need a filter, since there's always a junk
|
||||
* 'ctid' attribute present --- no need to look first.
|
||||
* SELECT INTO also need a filter if the plan may return raw disk
|
||||
* tuples (else heap_insert will be scribbling on the source
|
||||
* relation!). UPDATE and DELETE always need a filter, since there's
|
||||
* always a junk 'ctid' attribute present --- no need to look first.
|
||||
*/
|
||||
{
|
||||
bool junk_filter_needed = false;
|
||||
@@ -1460,7 +1460,7 @@ ldelete:;
|
||||
&ctid,
|
||||
estate->es_snapshot->curcid,
|
||||
estate->es_crosscheck_snapshot,
|
||||
true /* wait for commit */);
|
||||
true /* wait for commit */ );
|
||||
switch (result)
|
||||
{
|
||||
case HeapTupleSelfUpdated:
|
||||
@@ -1596,7 +1596,7 @@ lreplace:;
|
||||
&ctid,
|
||||
estate->es_snapshot->curcid,
|
||||
estate->es_crosscheck_snapshot,
|
||||
true /* wait for commit */);
|
||||
true /* wait for commit */ );
|
||||
switch (result)
|
||||
{
|
||||
case HeapTupleSelfUpdated:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.167 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.168 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -59,51 +59,51 @@ static Datum ExecEvalArrayRef(ArrayRefExprState *astate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalAggref(AggrefExprState *aggref,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
|
||||
List *argList, ExprContext *econtext);
|
||||
static Datum ExecMakeFunctionResultNoSets(FuncExprState *fcache,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCaseTestExpr(ExprState *exprstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalArray(ArrayExprState *astate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalRow(RowExprState *rstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalNullTest(GenericExprState *nstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
@@ -114,14 +114,14 @@ static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalCoerceToDomainValue(ExprState *exprstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalFieldSelect(FieldSelectState *fstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalFieldStore(FieldStoreState *fstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalRelabelType(GenericExprState *exprstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
@@ -145,7 +145,7 @@ static Datum ExecEvalRelabelType(GenericExprState *exprstate,
|
||||
*
|
||||
* Note: for notational simplicity we declare these functions as taking the
|
||||
* specific type of ExprState that they work on. This requires casting when
|
||||
* assigning the function pointer in ExecInitExpr. Be careful that the
|
||||
* assigning the function pointer in ExecInitExpr. Be careful that the
|
||||
* function signature is declared correctly, because the cast suppresses
|
||||
* automatic checking!
|
||||
*
|
||||
@@ -236,13 +236,13 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
|
||||
isDone));
|
||||
|
||||
/*
|
||||
* If refexpr yields NULL, and it's a fetch, then result is NULL.
|
||||
* In the assignment case, we'll cons up something below.
|
||||
* If refexpr yields NULL, and it's a fetch, then result is NULL. In
|
||||
* the assignment case, we'll cons up something below.
|
||||
*/
|
||||
if (*isNull)
|
||||
{
|
||||
if (isDone && *isDone == ExprEndResult)
|
||||
return (Datum) NULL; /* end of set result */
|
||||
return (Datum) NULL; /* end of set result */
|
||||
if (!isAssignment)
|
||||
return (Datum) NULL;
|
||||
}
|
||||
@@ -321,10 +321,11 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
|
||||
*
|
||||
* XXX At some point we'll need to look into making the old value of
|
||||
* the array element available via CaseTestExpr, as is done by
|
||||
* ExecEvalFieldStore. This is not needed now but will be needed
|
||||
* to support arrays of composite types; in an assignment to a field
|
||||
* of an array member, the parser would generate a FieldStore that
|
||||
* expects to fetch its input tuple via CaseTestExpr.
|
||||
* ExecEvalFieldStore. This is not needed now but will be needed
|
||||
* to support arrays of composite types; in an assignment to a
|
||||
* field of an array member, the parser would generate a
|
||||
* FieldStore that expects to fetch its input tuple via
|
||||
* CaseTestExpr.
|
||||
*/
|
||||
sourceData = ExecEvalExpr(astate->refassgnexpr,
|
||||
econtext,
|
||||
@@ -339,15 +340,16 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
|
||||
return PointerGetDatum(array_source);
|
||||
|
||||
/*
|
||||
* For an assignment, if all the subscripts and the input expression
|
||||
* are non-null but the original array is null, then substitute an
|
||||
* empty (zero-dimensional) array and proceed with the assignment.
|
||||
* This only works for varlena arrays, though; for fixed-length
|
||||
* array types we punt and return the null input array.
|
||||
* For an assignment, if all the subscripts and the input
|
||||
* expression are non-null but the original array is null, then
|
||||
* substitute an empty (zero-dimensional) array and proceed with
|
||||
* the assignment. This only works for varlena arrays, though; for
|
||||
* fixed-length array types we punt and return the null input
|
||||
* array.
|
||||
*/
|
||||
if (*isNull)
|
||||
{
|
||||
if (astate->refattrlength > 0) /* fixed-length array? */
|
||||
if (astate->refattrlength > 0) /* fixed-length array? */
|
||||
return PointerGetDatum(array_source);
|
||||
|
||||
array_source = construct_md_array(NULL, 0, NULL, NULL,
|
||||
@@ -444,10 +446,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
/*
|
||||
* Get the slot and attribute number we want
|
||||
*
|
||||
* The asserts check that references to system attributes only appear
|
||||
* at the level of a relation scan; at higher levels, system attributes
|
||||
* must be treated as ordinary variables (since we no longer have access
|
||||
* to the original tuple).
|
||||
* The asserts check that references to system attributes only appear at
|
||||
* the level of a relation scan; at higher levels, system attributes
|
||||
* must be treated as ordinary variables (since we no longer have
|
||||
* access to the original tuple).
|
||||
*/
|
||||
attnum = variable->varattno;
|
||||
|
||||
@@ -476,8 +478,8 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
tuple_type = slot->ttc_tupleDescriptor;
|
||||
|
||||
/*
|
||||
* Some checks that are only applied for user attribute numbers
|
||||
* (bogus system attnums will be caught inside heap_getattr).
|
||||
* Some checks that are only applied for user attribute numbers (bogus
|
||||
* system attnums will be caught inside heap_getattr).
|
||||
*/
|
||||
if (attnum > 0)
|
||||
{
|
||||
@@ -488,9 +490,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
tuple_type->attrs[attnum - 1] != NULL);
|
||||
|
||||
/*
|
||||
* If the attribute's column has been dropped, we force a NULL result.
|
||||
* This case should not happen in normal use, but it could happen if
|
||||
* we are executing a plan cached before the column was dropped.
|
||||
* If the attribute's column has been dropped, we force a NULL
|
||||
* result. This case should not happen in normal use, but it could
|
||||
* happen if we are executing a plan cached before the column was
|
||||
* dropped.
|
||||
*/
|
||||
if (tuple_type->attrs[attnum - 1]->attisdropped)
|
||||
{
|
||||
@@ -499,13 +502,14 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
}
|
||||
|
||||
/*
|
||||
* This assert checks that the datatype the plan expects to get (as
|
||||
* told by our "variable" argument) is in fact the datatype of the
|
||||
* attribute being fetched (as seen in the current context, identified
|
||||
* by our "econtext" argument). Otherwise crashes are likely.
|
||||
* This assert checks that the datatype the plan expects to get
|
||||
* (as told by our "variable" argument) is in fact the datatype of
|
||||
* the attribute being fetched (as seen in the current context,
|
||||
* identified by our "econtext" argument). Otherwise crashes are
|
||||
* likely.
|
||||
*
|
||||
* Note that we can't check dropped columns, since their atttypid
|
||||
* has been zeroed.
|
||||
* Note that we can't check dropped columns, since their atttypid has
|
||||
* been zeroed.
|
||||
*/
|
||||
Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid);
|
||||
}
|
||||
@@ -590,7 +594,8 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* All other parameter types must be sought in ecxt_param_list_info.
|
||||
* All other parameter types must be sought in
|
||||
* ecxt_param_list_info.
|
||||
*/
|
||||
ParamListInfo paramInfo;
|
||||
|
||||
@@ -964,7 +969,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
|
||||
{
|
||||
RegisterExprContextCallback(econtext,
|
||||
ShutdownFuncExpr,
|
||||
PointerGetDatum(fcache));
|
||||
PointerGetDatum(fcache));
|
||||
fcache->shutdown_reg = true;
|
||||
}
|
||||
}
|
||||
@@ -1006,8 +1011,8 @@ ExecMakeFunctionResult(FuncExprState *fcache,
|
||||
*
|
||||
* We change the ExprState function pointer to use the simpler
|
||||
* ExecMakeFunctionResultNoSets on subsequent calls. This amounts
|
||||
* to assuming that no argument can return a set if it didn't do so
|
||||
* the first time.
|
||||
* to assuming that no argument can return a set if it didn't do
|
||||
* so the first time.
|
||||
*/
|
||||
fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets;
|
||||
|
||||
@@ -1098,7 +1103,7 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache,
|
||||
}
|
||||
}
|
||||
}
|
||||
/* fcinfo.isnull = false; */ /* handled by MemSet */
|
||||
/* fcinfo.isnull = false; */ /* handled by MemSet */
|
||||
result = FunctionCallInvoke(&fcinfo);
|
||||
*isNull = fcinfo.isnull;
|
||||
|
||||
@@ -1273,9 +1278,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
break;
|
||||
|
||||
/*
|
||||
* Can't do anything useful with NULL rowtype values. Currently
|
||||
* we raise an error, but another alternative is to just ignore
|
||||
* the result and "continue" to get another row.
|
||||
* Can't do anything useful with NULL rowtype values.
|
||||
* Currently we raise an error, but another alternative is to
|
||||
* just ignore the result and "continue" to get another row.
|
||||
*/
|
||||
if (returnsTuple && fcinfo.isnull)
|
||||
ereport(ERROR,
|
||||
@@ -1293,13 +1298,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
{
|
||||
/*
|
||||
* Use the type info embedded in the rowtype Datum to
|
||||
* look up the needed tupdesc. Make a copy for the query.
|
||||
* look up the needed tupdesc. Make a copy for the
|
||||
* query.
|
||||
*/
|
||||
HeapTupleHeader td;
|
||||
HeapTupleHeader td;
|
||||
|
||||
td = DatumGetHeapTupleHeader(result);
|
||||
tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td),
|
||||
HeapTupleHeaderGetTypMod(td));
|
||||
HeapTupleHeaderGetTypMod(td));
|
||||
tupdesc = CreateTupleDescCopy(tupdesc);
|
||||
}
|
||||
else
|
||||
@@ -1326,7 +1332,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
|
||||
*/
|
||||
if (returnsTuple)
|
||||
{
|
||||
HeapTupleHeader td;
|
||||
HeapTupleHeader td;
|
||||
|
||||
td = DatumGetHeapTupleHeader(result);
|
||||
|
||||
@@ -1826,10 +1832,10 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
|
||||
*isDone = ExprSingleResult;
|
||||
|
||||
/*
|
||||
* If there's a test expression, we have to evaluate it and save
|
||||
* the value where the CaseTestExpr placeholders can find it.
|
||||
* We must save and restore prior setting of econtext's caseValue fields,
|
||||
* in case this node is itself within a larger CASE.
|
||||
* If there's a test expression, we have to evaluate it and save the
|
||||
* value where the CaseTestExpr placeholders can find it. We must save
|
||||
* and restore prior setting of econtext's caseValue fields, in case
|
||||
* this node is itself within a larger CASE.
|
||||
*/
|
||||
save_datum = econtext->caseValue_datum;
|
||||
save_isNull = econtext->caseValue_isNull;
|
||||
@@ -1838,7 +1844,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
|
||||
{
|
||||
econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg,
|
||||
econtext,
|
||||
&econtext->caseValue_isNull,
|
||||
&econtext->caseValue_isNull,
|
||||
NULL);
|
||||
}
|
||||
|
||||
@@ -2009,7 +2015,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("cannot merge incompatible arrays"),
|
||||
errdetail("Array with element type %s cannot be "
|
||||
"included in ARRAY construct with element type %s.",
|
||||
"included in ARRAY construct with element type %s.",
|
||||
format_type_be(ARR_ELEMTYPE(array)),
|
||||
format_type_be(element_type))));
|
||||
|
||||
@@ -2021,8 +2027,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
|
||||
if (ndims <= 0 || ndims > MAXDIM)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("number of array dimensions (%d) exceeds " \
|
||||
"the maximum allowed (%d)", ndims, MAXDIM)));
|
||||
errmsg("number of array dimensions (%d) exceeds " \
|
||||
"the maximum allowed (%d)", ndims, MAXDIM)));
|
||||
|
||||
elem_dims = (int *) palloc(elem_ndims * sizeof(int));
|
||||
memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int));
|
||||
@@ -2600,18 +2606,18 @@ ExecEvalFieldStore(FieldStoreState *fstate,
|
||||
|
||||
forboth(l1, fstate->newvals, l2, fstore->fieldnums)
|
||||
{
|
||||
ExprState *newval = (ExprState *) lfirst(l1);
|
||||
AttrNumber fieldnum = lfirst_int(l2);
|
||||
ExprState *newval = (ExprState *) lfirst(l1);
|
||||
AttrNumber fieldnum = lfirst_int(l2);
|
||||
bool eisnull;
|
||||
|
||||
Assert(fieldnum > 0 && fieldnum <= tupDesc->natts);
|
||||
|
||||
/*
|
||||
* Use the CaseTestExpr mechanism to pass down the old value of the
|
||||
* field being replaced; this is useful in case we have a nested field
|
||||
* update situation. It's safe to reuse the CASE mechanism because
|
||||
* there cannot be a CASE between here and where the value would be
|
||||
* needed.
|
||||
* Use the CaseTestExpr mechanism to pass down the old value of
|
||||
* the field being replaced; this is useful in case we have a
|
||||
* nested field update situation. It's safe to reuse the CASE
|
||||
* mechanism because there cannot be a CASE between here and where
|
||||
* the value would be needed.
|
||||
*/
|
||||
econtext->caseValue_datum = values[fieldnum - 1];
|
||||
econtext->caseValue_isNull = (nulls[fieldnum - 1] == 'n');
|
||||
@@ -2981,7 +2987,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
break;
|
||||
case T_RowExpr:
|
||||
{
|
||||
RowExpr *rowexpr = (RowExpr *) node;
|
||||
RowExpr *rowexpr = (RowExpr *) node;
|
||||
RowExprState *rstate = makeNode(RowExprState);
|
||||
Form_pg_attribute *attrs;
|
||||
List *outlist = NIL;
|
||||
@@ -3016,15 +3022,15 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
/*
|
||||
* Guard against ALTER COLUMN TYPE on rowtype
|
||||
* since the RowExpr was created. XXX should we
|
||||
* check typmod too? Not sure we can be sure it'll
|
||||
* be the same.
|
||||
* check typmod too? Not sure we can be sure
|
||||
* it'll be the same.
|
||||
*/
|
||||
if (exprType((Node *) e) != attrs[i]->atttypid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("ROW() column has type %s instead of type %s",
|
||||
format_type_be(exprType((Node *) e)),
|
||||
format_type_be(attrs[i]->atttypid))));
|
||||
format_type_be(exprType((Node *) e)),
|
||||
format_type_be(attrs[i]->atttypid))));
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -3111,7 +3117,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
TargetEntry *tle = (TargetEntry *) node;
|
||||
GenericExprState *gstate = makeNode(GenericExprState);
|
||||
|
||||
gstate->xprstate.evalfunc = NULL; /* not used */
|
||||
gstate->xprstate.evalfunc = NULL; /* not used */
|
||||
gstate->arg = ExecInitExpr(tle->expr, parent);
|
||||
state = (ExprState *) gstate;
|
||||
}
|
||||
@@ -3546,8 +3552,8 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
|
||||
/*
|
||||
* store the tuple in the projection slot and return the slot.
|
||||
*/
|
||||
return ExecStoreTuple(newTuple, /* tuple to store */
|
||||
slot, /* slot to store in */
|
||||
InvalidBuffer, /* tuple has no buffer */
|
||||
return ExecStoreTuple(newTuple, /* tuple to store */
|
||||
slot, /* slot to store in */
|
||||
InvalidBuffer, /* tuple has no buffer */
|
||||
true);
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.32 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.33 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -224,8 +224,8 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc
|
||||
return false; /* tlist too long */
|
||||
|
||||
/*
|
||||
* If the plan context requires a particular hasoid setting, then
|
||||
* that has to match, too.
|
||||
* If the plan context requires a particular hasoid setting, then that
|
||||
* has to match, too.
|
||||
*/
|
||||
if (ExecContextForcesOids(ps, &hasoid) &&
|
||||
hasoid != tupdesc->tdhasoid)
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.81 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.82 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -117,7 +117,7 @@
|
||||
|
||||
|
||||
static TupleDesc ExecTypeFromTLInternal(List *targetList,
|
||||
bool hasoid, bool skipjunk);
|
||||
bool hasoid, bool skipjunk);
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
@@ -149,7 +149,7 @@ ExecCreateTupleTable(int initialSize) /* initial number of slots in
|
||||
|
||||
/*
|
||||
* Now allocate our new table along with space for the pointers to the
|
||||
* tuples. Zero out the slots.
|
||||
* tuples. Zero out the slots.
|
||||
*/
|
||||
|
||||
newtable = (TupleTable) palloc(sizeof(TupleTableData));
|
||||
@@ -568,10 +568,10 @@ ExecCleanTypeFromTL(List *targetList, bool hasoid)
|
||||
static TupleDesc
|
||||
ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk)
|
||||
{
|
||||
TupleDesc typeInfo;
|
||||
ListCell *l;
|
||||
int len;
|
||||
int cur_resno = 1;
|
||||
TupleDesc typeInfo;
|
||||
ListCell *l;
|
||||
int len;
|
||||
int cur_resno = 1;
|
||||
|
||||
if (skipjunk)
|
||||
len = ExecCleanTargetListLength(targetList);
|
||||
@@ -581,8 +581,8 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk)
|
||||
|
||||
foreach(l, targetList)
|
||||
{
|
||||
TargetEntry *tle = lfirst(l);
|
||||
Resdom *resdom = tle->resdom;
|
||||
TargetEntry *tle = lfirst(l);
|
||||
Resdom *resdom = tle->resdom;
|
||||
|
||||
if (skipjunk && resdom->resjunk)
|
||||
continue;
|
||||
@@ -605,16 +605,16 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk)
|
||||
TupleDesc
|
||||
ExecTypeFromExprList(List *exprList)
|
||||
{
|
||||
TupleDesc typeInfo;
|
||||
ListCell *l;
|
||||
int cur_resno = 1;
|
||||
TupleDesc typeInfo;
|
||||
ListCell *l;
|
||||
int cur_resno = 1;
|
||||
char fldname[NAMEDATALEN];
|
||||
|
||||
typeInfo = CreateTemplateTupleDesc(list_length(exprList), false);
|
||||
|
||||
foreach(l, exprList)
|
||||
{
|
||||
Node *e = lfirst(l);
|
||||
Node *e = lfirst(l);
|
||||
|
||||
sprintf(fldname, "f%d", cur_resno);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.113 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.114 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -179,7 +179,7 @@ CreateExecutorState(void)
|
||||
*/
|
||||
estate->es_direction = ForwardScanDirection;
|
||||
estate->es_snapshot = SnapshotNow;
|
||||
estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */
|
||||
estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */
|
||||
estate->es_range_table = NIL;
|
||||
|
||||
estate->es_result_relations = NULL;
|
||||
@@ -248,7 +248,8 @@ FreeExecutorState(EState *estate)
|
||||
*/
|
||||
while (estate->es_exprcontexts)
|
||||
{
|
||||
/* XXX: seems there ought to be a faster way to implement this
|
||||
/*
|
||||
* XXX: seems there ought to be a faster way to implement this
|
||||
* than repeated list_delete(), no?
|
||||
*/
|
||||
FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts));
|
||||
@@ -364,7 +365,7 @@ FreeExprContext(ExprContext *econtext)
|
||||
* ReScanExprContext
|
||||
*
|
||||
* Reset an expression context in preparation for a rescan of its
|
||||
* plan node. This requires calling any registered shutdown callbacks,
|
||||
* plan node. This requires calling any registered shutdown callbacks,
|
||||
* since any partially complete set-returning-functions must be canceled.
|
||||
*
|
||||
* Note we make no assumption about the caller's memory context.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.85 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.86 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -58,7 +58,7 @@ typedef struct local_es
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
Oid *argtypes; /* resolved types of arguments */
|
||||
Oid *argtypes; /* resolved types of arguments */
|
||||
Oid rettype; /* actual return type */
|
||||
int typlen; /* length of the return type */
|
||||
bool typbyval; /* true if return type is pass by value */
|
||||
@@ -94,7 +94,7 @@ init_execution_state(List *queryTree_list)
|
||||
{
|
||||
execution_state *firstes = NULL;
|
||||
execution_state *preves = NULL;
|
||||
ListCell *qtl_item;
|
||||
ListCell *qtl_item;
|
||||
|
||||
foreach(qtl_item, queryTree_list)
|
||||
{
|
||||
@@ -180,8 +180,8 @@ init_sql_fcache(FmgrInfo *finfo)
|
||||
typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
|
||||
|
||||
/*
|
||||
* get the type length and by-value flag from the type tuple; also
|
||||
* do a preliminary check for returnsTuple (this may prove inaccurate,
|
||||
* get the type length and by-value flag from the type tuple; also do
|
||||
* a preliminary check for returnsTuple (this may prove inaccurate,
|
||||
* see below).
|
||||
*/
|
||||
fcache->typlen = typeStruct->typlen;
|
||||
@@ -190,8 +190,8 @@ init_sql_fcache(FmgrInfo *finfo)
|
||||
rettype == RECORDOID);
|
||||
|
||||
/*
|
||||
* Parse and rewrite the queries. We need the argument type info to pass
|
||||
* to the parser.
|
||||
* Parse and rewrite the queries. We need the argument type info to
|
||||
* pass to the parser.
|
||||
*/
|
||||
nargs = procedureStruct->pronargs;
|
||||
haspolyarg = false;
|
||||
@@ -240,11 +240,11 @@ init_sql_fcache(FmgrInfo *finfo)
|
||||
* If the function has any arguments declared as polymorphic types,
|
||||
* then it wasn't type-checked at definition time; must do so now.
|
||||
*
|
||||
* Also, force a type-check if the declared return type is a rowtype;
|
||||
* we need to find out whether we are actually returning the whole
|
||||
* tuple result, or just regurgitating a rowtype expression result.
|
||||
* In the latter case we clear returnsTuple because we need not act
|
||||
* different from the scalar result case.
|
||||
* Also, force a type-check if the declared return type is a rowtype; we
|
||||
* need to find out whether we are actually returning the whole tuple
|
||||
* result, or just regurgitating a rowtype expression result. In the
|
||||
* latter case we clear returnsTuple because we need not act different
|
||||
* from the scalar result case.
|
||||
*/
|
||||
if (haspolyarg || fcache->returnsTuple)
|
||||
fcache->returnsTuple = check_sql_fn_retval(rettype,
|
||||
@@ -395,9 +395,9 @@ postquel_execute(execution_state *es,
|
||||
* XXX do we need to remove junk attrs from the result tuple?
|
||||
* Probably OK to leave them, as long as they are at the end.
|
||||
*/
|
||||
HeapTupleHeader dtup;
|
||||
Oid dtuptype;
|
||||
int32 dtuptypmod;
|
||||
HeapTupleHeader dtup;
|
||||
Oid dtuptype;
|
||||
int32 dtuptypmod;
|
||||
|
||||
dtup = (HeapTupleHeader) palloc(tup->t_len);
|
||||
memcpy((char *) dtup, (char *) tup->t_data, tup->t_len);
|
||||
@@ -433,8 +433,8 @@ postquel_execute(execution_state *es,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Returning a scalar, which we have to extract from the
|
||||
* first column of the SELECT result, and then copy into current
|
||||
* Returning a scalar, which we have to extract from the first
|
||||
* column of the SELECT result, and then copy into current
|
||||
* execution context if needed.
|
||||
*/
|
||||
value = heap_getattr(tup, 1, tupDesc, &(fcinfo->isnull));
|
||||
@@ -635,7 +635,8 @@ sql_exec_error_callback(void *arg)
|
||||
fn_name = NameStr(functup->proname);
|
||||
|
||||
/*
|
||||
* If there is a syntax error position, convert to internal syntax error
|
||||
* If there is a syntax error position, convert to internal syntax
|
||||
* error
|
||||
*/
|
||||
syntaxerrposition = geterrposition();
|
||||
if (syntaxerrposition > 0)
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.124 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.125 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -252,11 +252,11 @@ initialize_aggregates(AggState *aggstate,
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are reinitializing after a group boundary, we have to free
|
||||
* any prior transValue to avoid memory leakage. We must check not
|
||||
* only the isnull flag but whether the pointer is NULL; since
|
||||
* pergroupstate is initialized with palloc0, the initial condition
|
||||
* has isnull = 0 and null pointer.
|
||||
* If we are reinitializing after a group boundary, we have to
|
||||
* free any prior transValue to avoid memory leakage. We must
|
||||
* check not only the isnull flag but whether the pointer is NULL;
|
||||
* since pergroupstate is initialized with palloc0, the initial
|
||||
* condition has isnull = 0 and null pointer.
|
||||
*/
|
||||
if (!peraggstate->transtypeByVal &&
|
||||
!pergroupstate->transValueIsNull &&
|
||||
@@ -811,14 +811,14 @@ agg_retrieve_direct(AggState *aggstate)
|
||||
/*
|
||||
* If we have no first tuple (ie, the outerPlan didn't return
|
||||
* anything), create a dummy all-nulls input tuple for use by
|
||||
* ExecQual/ExecProject. 99.44% of the time this is a waste of cycles,
|
||||
* because ordinarily the projected output tuple's targetlist
|
||||
* cannot contain any direct (non-aggregated) references to input
|
||||
* columns, so the dummy tuple will not be referenced. However
|
||||
* there are special cases where this isn't so --- in particular
|
||||
* an UPDATE involving an aggregate will have a targetlist
|
||||
* reference to ctid. We need to return a null for ctid in that
|
||||
* situation, not coredump.
|
||||
* ExecQual/ExecProject. 99.44% of the time this is a waste of
|
||||
* cycles, because ordinarily the projected output tuple's
|
||||
* targetlist cannot contain any direct (non-aggregated)
|
||||
* references to input columns, so the dummy tuple will not be
|
||||
* referenced. However there are special cases where this isn't so
|
||||
* --- in particular an UPDATE involving an aggregate will have a
|
||||
* targetlist reference to ctid. We need to return a null for
|
||||
* ctid in that situation, not coredump.
|
||||
*
|
||||
* The values returned for the aggregates will be the initial values
|
||||
* of the transition functions.
|
||||
@@ -865,9 +865,9 @@ agg_retrieve_direct(AggState *aggstate)
|
||||
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
|
||||
{
|
||||
/*
|
||||
* Form and return a projection tuple using the aggregate results
|
||||
* and the representative input tuple. Note we do not support
|
||||
* aggregates returning sets ...
|
||||
* Form and return a projection tuple using the aggregate
|
||||
* results and the representative input tuple. Note we do not
|
||||
* support aggregates returning sets ...
|
||||
*/
|
||||
return ExecProject(projInfo, NULL);
|
||||
}
|
||||
@@ -1009,9 +1009,9 @@ agg_retrieve_hash_table(AggState *aggstate)
|
||||
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
|
||||
{
|
||||
/*
|
||||
* Form and return a projection tuple using the aggregate results
|
||||
* and the representative input tuple. Note we do not support
|
||||
* aggregates returning sets ...
|
||||
* Form and return a projection tuple using the aggregate
|
||||
* results and the representative input tuple. Note we do not
|
||||
* support aggregates returning sets ...
|
||||
*/
|
||||
return ExecProject(projInfo, NULL);
|
||||
}
|
||||
@@ -1478,7 +1478,10 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Reset the per-group state (in particular, mark transvalues null) */
|
||||
/*
|
||||
* Reset the per-group state (in particular, mark transvalues
|
||||
* null)
|
||||
*/
|
||||
MemSet(node->pergroup, 0,
|
||||
sizeof(AggStatePerGroupData) * node->numaggs);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.63 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.64 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -559,7 +559,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
|
||||
if (nread != sizeof(HeapTupleData))
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not read from hash-join temporary file: %m")));
|
||||
errmsg("could not read from hash-join temporary file: %m")));
|
||||
heapTuple = palloc(HEAPTUPLESIZE + htup.t_len);
|
||||
memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData));
|
||||
heapTuple->t_datamcxt = CurrentMemoryContext;
|
||||
@@ -569,7 +569,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
|
||||
if (nread != (size_t) htup.t_len)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not read from hash-join temporary file: %m")));
|
||||
errmsg("could not read from hash-join temporary file: %m")));
|
||||
return ExecStoreTuple(heapTuple, tupleSlot, InvalidBuffer, true);
|
||||
}
|
||||
|
||||
@@ -627,14 +627,14 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
|
||||
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0, 0L, SEEK_SET))
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not rewind hash-join temporary file: %m")));
|
||||
errmsg("could not rewind hash-join temporary file: %m")));
|
||||
|
||||
innerFile = hashtable->innerBatchFile[newbatch - 1];
|
||||
|
||||
if (BufFileSeek(innerFile, 0, 0L, SEEK_SET))
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not rewind hash-join temporary file: %m")));
|
||||
errmsg("could not rewind hash-join temporary file: %m")));
|
||||
|
||||
/*
|
||||
* Reload the hash table with the new inner batch
|
||||
@@ -685,12 +685,12 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple,
|
||||
if (written != sizeof(HeapTupleData))
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write to hash-join temporary file: %m")));
|
||||
errmsg("could not write to hash-join temporary file: %m")));
|
||||
written = BufFileWrite(file, (void *) heapTuple->t_data, heapTuple->t_len);
|
||||
if (written != (size_t) heapTuple->t_len)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write to hash-join temporary file: %m")));
|
||||
errmsg("could not write to hash-join temporary file: %m")));
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.96 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.97 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -38,7 +38,7 @@
|
||||
* In a multiple-index plan, we must take care to return any given tuple
|
||||
* only once, even if it matches conditions of several index scans. Our
|
||||
* preferred way to do this is to record already-returned tuples in a hash
|
||||
* table (using the TID as unique identifier). However, in a very large
|
||||
* table (using the TID as unique identifier). However, in a very large
|
||||
* scan this could conceivably run out of memory. We limit the hash table
|
||||
* to no more than work_mem KB; if it grows past that, we fall back to the
|
||||
* pre-7.4 technique: evaluate the prior-scan index quals again for each
|
||||
@@ -129,11 +129,11 @@ IndexNext(IndexScanState *node)
|
||||
scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
|
||||
|
||||
/*
|
||||
* Clear any reference to the previously returned tuple. The idea here
|
||||
* is to not have the tuple slot be the last holder of a pin on that
|
||||
* tuple's buffer; if it is, we'll need a separate visit to the bufmgr
|
||||
* to release the buffer. By clearing here, we get to have the release
|
||||
* done by ReleaseAndReadBuffer inside index_getnext.
|
||||
* Clear any reference to the previously returned tuple. The idea
|
||||
* here is to not have the tuple slot be the last holder of a pin on
|
||||
* that tuple's buffer; if it is, we'll need a separate visit to the
|
||||
* bufmgr to release the buffer. By clearing here, we get to have the
|
||||
* release done by ReleaseAndReadBuffer inside index_getnext.
|
||||
*/
|
||||
ExecClearTuple(slot);
|
||||
|
||||
@@ -215,8 +215,9 @@ IndexNext(IndexScanState *node)
|
||||
false); /* don't pfree */
|
||||
|
||||
/*
|
||||
* If any of the index operators involved in this scan are lossy,
|
||||
* recheck them by evaluating the original operator clauses.
|
||||
* If any of the index operators involved in this scan are
|
||||
* lossy, recheck them by evaluating the original operator
|
||||
* clauses.
|
||||
*/
|
||||
if (lossyQual)
|
||||
{
|
||||
@@ -224,15 +225,19 @@ IndexNext(IndexScanState *node)
|
||||
ResetExprContext(econtext);
|
||||
if (!ExecQual(lossyQual, econtext, false))
|
||||
{
|
||||
/* Fails lossy op, so drop it and loop back for another */
|
||||
/*
|
||||
* Fails lossy op, so drop it and loop back for
|
||||
* another
|
||||
*/
|
||||
ExecClearTuple(slot);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If it's a multiple-index scan, make sure not to double-report
|
||||
* a tuple matched by more than one index. (See notes above.)
|
||||
* If it's a multiple-index scan, make sure not to
|
||||
* double-report a tuple matched by more than one index. (See
|
||||
* notes above.)
|
||||
*/
|
||||
if (numIndices > 1)
|
||||
{
|
||||
@@ -240,7 +245,7 @@ IndexNext(IndexScanState *node)
|
||||
if (node->iss_DupHash)
|
||||
{
|
||||
DupHashTabEntry *entry;
|
||||
bool found;
|
||||
bool found;
|
||||
|
||||
entry = (DupHashTabEntry *)
|
||||
hash_search(node->iss_DupHash,
|
||||
@@ -248,7 +253,7 @@ IndexNext(IndexScanState *node)
|
||||
HASH_ENTER,
|
||||
&found);
|
||||
if (entry == NULL ||
|
||||
node->iss_DupHash->hctl->nentries > node->iss_MaxHash)
|
||||
node->iss_DupHash->hctl->nentries > node->iss_MaxHash)
|
||||
{
|
||||
/* out of memory (either hard or soft limit) */
|
||||
/* release hash table and fall thru to old code */
|
||||
@@ -679,10 +684,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
|
||||
* initialize child expressions
|
||||
*
|
||||
* Note: we don't initialize all of the indxqual expression, only the
|
||||
* sub-parts corresponding to runtime keys (see below). The indxqualorig
|
||||
* expression is always initialized even though it will only be used in
|
||||
* some uncommon cases --- would be nice to improve that. (Problem is
|
||||
* that any SubPlans present in the expression must be found now...)
|
||||
* sub-parts corresponding to runtime keys (see below). The
|
||||
* indxqualorig expression is always initialized even though it will
|
||||
* only be used in some uncommon cases --- would be nice to improve
|
||||
* that. (Problem is that any SubPlans present in the expression must
|
||||
* be found now...)
|
||||
*/
|
||||
indexstate->ss.ps.targetlist = (List *)
|
||||
ExecInitExpr((Expr *) node->scan.plan.targetlist,
|
||||
@@ -788,14 +794,14 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
|
||||
lossyflag_cell = list_head(lossyflags);
|
||||
for (j = 0; j < n_keys; j++)
|
||||
{
|
||||
OpExpr *clause; /* one clause of index qual */
|
||||
Expr *leftop; /* expr on lhs of operator */
|
||||
Expr *rightop; /* expr on rhs ... */
|
||||
OpExpr *clause; /* one clause of index qual */
|
||||
Expr *leftop; /* expr on lhs of operator */
|
||||
Expr *rightop; /* expr on rhs ... */
|
||||
int flags = 0;
|
||||
AttrNumber varattno; /* att number used in scan */
|
||||
StrategyNumber strategy; /* op's strategy number */
|
||||
Oid subtype; /* op's strategy subtype */
|
||||
int lossy; /* op's recheck flag */
|
||||
Oid subtype; /* op's strategy subtype */
|
||||
int lossy; /* op's recheck flag */
|
||||
RegProcedure opfuncid; /* operator proc id used in scan */
|
||||
Datum scanvalue; /* value used in scan (if const) */
|
||||
|
||||
@@ -819,15 +825,16 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
|
||||
/*
|
||||
* Here we figure out the contents of the index qual. The
|
||||
* usual case is (var op const) which means we form a scan key
|
||||
* for the attribute listed in the var node and use the value of
|
||||
* the const as comparison data.
|
||||
* for the attribute listed in the var node and use the value
|
||||
* of the const as comparison data.
|
||||
*
|
||||
* If we don't have a const node, it means our scan key is a
|
||||
* function of information obtained during the execution of the
|
||||
* plan, in which case we need to recalculate the index scan key
|
||||
* at run time. Hence, we set have_runtime_keys to true and place
|
||||
* the appropriate subexpression in run_keys. The corresponding
|
||||
* scan key values are recomputed at run time.
|
||||
* function of information obtained during the execution of
|
||||
* the plan, in which case we need to recalculate the index
|
||||
* scan key at run time. Hence, we set have_runtime_keys to
|
||||
* true and place the appropriate subexpression in run_keys.
|
||||
* The corresponding scan key values are recomputed at run
|
||||
* time.
|
||||
*/
|
||||
run_keys[j] = NULL;
|
||||
|
||||
@@ -892,18 +899,18 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
|
||||
scanvalue); /* constant */
|
||||
|
||||
/*
|
||||
* If this operator is lossy, add its indxqualorig
|
||||
* expression to the list of quals to recheck. The
|
||||
* list_nth() calls here could be avoided by chasing the
|
||||
* lists in parallel to all the other lists, but since
|
||||
* lossy operators are very uncommon, it's probably a
|
||||
* waste of time to do so.
|
||||
* If this operator is lossy, add its indxqualorig expression
|
||||
* to the list of quals to recheck. The list_nth() calls here
|
||||
* could be avoided by chasing the lists in parallel to all
|
||||
* the other lists, but since lossy operators are very
|
||||
* uncommon, it's probably a waste of time to do so.
|
||||
*/
|
||||
if (lossy)
|
||||
{
|
||||
List *qualOrig = indexstate->indxqualorig;
|
||||
List *qualOrig = indexstate->indxqualorig;
|
||||
|
||||
lossyQuals[i] = lappend(lossyQuals[i],
|
||||
list_nth((List *) list_nth(qualOrig, i), j));
|
||||
list_nth((List *) list_nth(qualOrig, i), j));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1037,7 +1044,7 @@ create_duphash(IndexScanState *node)
|
||||
node->iss_DupHash = hash_create("DupHashTable",
|
||||
nbuckets,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
if (node->iss_DupHash == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.67 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.68 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -104,10 +104,10 @@ static void
|
||||
MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals,
|
||||
PlanState *parent)
|
||||
{
|
||||
List *ltexprs,
|
||||
*gtexprs;
|
||||
ListCell *ltcdr,
|
||||
*gtcdr;
|
||||
List *ltexprs,
|
||||
*gtexprs;
|
||||
ListCell *ltcdr,
|
||||
*gtcdr;
|
||||
|
||||
/*
|
||||
* Make modifiable copies of the qualList.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.49 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.50 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -62,11 +62,11 @@ SeqNext(SeqScanState *node)
|
||||
slot = node->ss_ScanTupleSlot;
|
||||
|
||||
/*
|
||||
* Clear any reference to the previously returned tuple. The idea here
|
||||
* is to not have the tuple slot be the last holder of a pin on that
|
||||
* tuple's buffer; if it is, we'll need a separate visit to the bufmgr
|
||||
* to release the buffer. By clearing here, we get to have the release
|
||||
* done by ReleaseAndReadBuffer inside heap_getnext.
|
||||
* Clear any reference to the previously returned tuple. The idea
|
||||
* here is to not have the tuple slot be the last holder of a pin on
|
||||
* that tuple's buffer; if it is, we'll need a separate visit to the
|
||||
* bufmgr to release the buffer. By clearing here, we get to have the
|
||||
* release done by ReleaseAndReadBuffer inside heap_getnext.
|
||||
*/
|
||||
ExecClearTuple(slot);
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.64 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.65 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -912,7 +912,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
|
||||
SubLinkType subLinkType = subplan->subLinkType;
|
||||
MemoryContext oldcontext;
|
||||
TupleTableSlot *slot;
|
||||
ListCell *l;
|
||||
ListCell *l;
|
||||
bool found = false;
|
||||
ArrayBuildState *astate = NULL;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.43 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.44 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -109,8 +109,9 @@ ExecUnique(UniqueState *node)
|
||||
* he next calls us.
|
||||
*
|
||||
* tgl 3/2004: the above concern is no longer valid; junkfilters used to
|
||||
* modify their input's return slot but don't anymore, and I don't think
|
||||
* anyplace else does either. Not worth changing this code though.
|
||||
* modify their input's return slot but don't anymore, and I don't
|
||||
* think anyplace else does either. Not worth changing this code
|
||||
* though.
|
||||
*/
|
||||
if (node->priorTuple != NULL)
|
||||
heap_freetuple(node->priorTuple);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.124 2004/08/29 04:12:31 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.125 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -29,17 +29,17 @@ int SPI_result;
|
||||
|
||||
static _SPI_connection *_SPI_stack = NULL;
|
||||
static _SPI_connection *_SPI_current = NULL;
|
||||
static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
|
||||
static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
|
||||
static int _SPI_connected = -1;
|
||||
static int _SPI_curid = -1;
|
||||
|
||||
static int _SPI_execute(const char *src, int tcount, _SPI_plan *plan);
|
||||
static int _SPI_pquery(QueryDesc *queryDesc, bool runit,
|
||||
bool useCurrentSnapshot, int tcount);
|
||||
static int _SPI_pquery(QueryDesc *queryDesc, bool runit,
|
||||
bool useCurrentSnapshot, int tcount);
|
||||
|
||||
static int _SPI_execute_plan(_SPI_plan *plan,
|
||||
Datum *Values, const char *Nulls,
|
||||
bool useCurrentSnapshot, int tcount);
|
||||
Datum *Values, const char *Nulls,
|
||||
bool useCurrentSnapshot, int tcount);
|
||||
|
||||
static void _SPI_error_callback(void *arg);
|
||||
|
||||
@@ -60,7 +60,7 @@ static bool _SPI_checktuples(void);
|
||||
int
|
||||
SPI_connect(void)
|
||||
{
|
||||
int newdepth;
|
||||
int newdepth;
|
||||
|
||||
/*
|
||||
* When procedure called by Executor _SPI_curid expected to be equal
|
||||
@@ -107,9 +107,9 @@ SPI_connect(void)
|
||||
/*
|
||||
* Create memory contexts for this procedure
|
||||
*
|
||||
* XXX it would be better to use PortalContext as the parent context,
|
||||
* but we may not be inside a portal (consider deferred-trigger
|
||||
* execution). Perhaps CurTransactionContext would do? For now it
|
||||
* XXX it would be better to use PortalContext as the parent context, but
|
||||
* we may not be inside a portal (consider deferred-trigger
|
||||
* execution). Perhaps CurTransactionContext would do? For now it
|
||||
* doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
|
||||
*/
|
||||
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
|
||||
@@ -201,7 +201,7 @@ AtEOXact_SPI(bool isCommit)
|
||||
void
|
||||
AtEOSubXact_SPI(bool isCommit, TransactionId childXid)
|
||||
{
|
||||
bool found = false;
|
||||
bool found = false;
|
||||
|
||||
while (_SPI_connected >= 0)
|
||||
{
|
||||
@@ -213,10 +213,10 @@ AtEOSubXact_SPI(bool isCommit, TransactionId childXid)
|
||||
found = true;
|
||||
|
||||
/*
|
||||
* Pop the stack entry and reset global variables. Unlike
|
||||
* Pop the stack entry and reset global variables. Unlike
|
||||
* SPI_finish(), we don't risk switching to memory contexts that
|
||||
* might be already gone, or deleting memory contexts that have been
|
||||
* or will be thrown away anyway.
|
||||
* might be already gone, or deleting memory contexts that have
|
||||
* been or will be thrown away anyway.
|
||||
*/
|
||||
_SPI_connected--;
|
||||
_SPI_curid = _SPI_connected;
|
||||
@@ -418,7 +418,7 @@ HeapTupleHeader
|
||||
SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc)
|
||||
{
|
||||
MemoryContext oldcxt = NULL;
|
||||
HeapTupleHeader dtup;
|
||||
HeapTupleHeader dtup;
|
||||
|
||||
if (tuple == NULL || tupdesc == NULL)
|
||||
{
|
||||
@@ -936,7 +936,7 @@ SPI_cursor_close(Portal portal)
|
||||
Oid
|
||||
SPI_getargtypeid(void *plan, int argIndex)
|
||||
{
|
||||
if (plan == NULL || argIndex < 0 || argIndex >= ((_SPI_plan*)plan)->nargs)
|
||||
if (plan == NULL || argIndex < 0 || argIndex >= ((_SPI_plan *) plan)->nargs)
|
||||
{
|
||||
SPI_result = SPI_ERROR_ARGUMENT;
|
||||
return InvalidOid;
|
||||
@@ -965,13 +965,13 @@ SPI_getargcount(void *plan)
|
||||
* if the command can be used with SPI_cursor_open
|
||||
*
|
||||
* Parameters
|
||||
* plan A plan previously prepared using SPI_prepare
|
||||
* plan A plan previously prepared using SPI_prepare
|
||||
*/
|
||||
bool
|
||||
SPI_is_cursor_plan(void *plan)
|
||||
{
|
||||
_SPI_plan *spiplan = (_SPI_plan *) plan;
|
||||
List *qtlist;
|
||||
_SPI_plan *spiplan = (_SPI_plan *) plan;
|
||||
List *qtlist;
|
||||
|
||||
if (spiplan == NULL)
|
||||
{
|
||||
@@ -982,7 +982,7 @@ SPI_is_cursor_plan(void *plan)
|
||||
qtlist = spiplan->qtlist;
|
||||
if (list_length(spiplan->ptlist) == 1 && list_length(qtlist) == 1)
|
||||
{
|
||||
Query *queryTree = (Query *) linitial((List *) linitial(qtlist));
|
||||
Query *queryTree = (Query *) linitial((List *) linitial(qtlist));
|
||||
|
||||
if (queryTree->commandType == CMD_SELECT && queryTree->into == NULL)
|
||||
return true;
|
||||
@@ -993,7 +993,7 @@ SPI_is_cursor_plan(void *plan)
|
||||
/*
|
||||
* SPI_result_code_string --- convert any SPI return code to a string
|
||||
*
|
||||
* This is often useful in error messages. Most callers will probably
|
||||
* This is often useful in error messages. Most callers will probably
|
||||
* only pass negative (error-case) codes, but for generality we recognize
|
||||
* the success codes too.
|
||||
*/
|
||||
@@ -1483,8 +1483,8 @@ _SPI_error_callback(void *arg)
|
||||
int syntaxerrposition;
|
||||
|
||||
/*
|
||||
* If there is a syntax error position, convert to internal syntax error;
|
||||
* otherwise treat the query as an item of context stack
|
||||
* If there is a syntax error position, convert to internal syntax
|
||||
* error; otherwise treat the query as an item of context stack
|
||||
*/
|
||||
syntaxerrposition = geterrposition();
|
||||
if (syntaxerrposition > 0)
|
||||
@@ -1632,7 +1632,8 @@ _SPI_copy_plan(_SPI_plan *plan, int location)
|
||||
parentcxt = _SPI_current->procCxt;
|
||||
else if (location == _SPI_CPLAN_TOPCXT)
|
||||
parentcxt = TopMemoryContext;
|
||||
else /* (this case not currently used) */
|
||||
else
|
||||
/* (this case not currently used) */
|
||||
parentcxt = CurrentMemoryContext;
|
||||
|
||||
/*
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.39 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.40 2004/08/29 05:06:42 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -223,7 +223,7 @@ enlargeStringInfo(StringInfo str, int needed)
|
||||
|
||||
/*
|
||||
* Guard against ridiculous "needed" values, which can occur if we're
|
||||
* fed bogus data. Without this, we can get an overflow or infinite
|
||||
* fed bogus data. Without this, we can get an overflow or infinite
|
||||
* loop in the following.
|
||||
*/
|
||||
if (needed < 0 ||
|
||||
@@ -249,9 +249,9 @@ enlargeStringInfo(StringInfo str, int needed)
|
||||
newlen = 2 * newlen;
|
||||
|
||||
/*
|
||||
* Clamp to MaxAllocSize in case we went past it. Note we are assuming
|
||||
* here that MaxAllocSize <= INT_MAX/2, else the above loop could
|
||||
* overflow. We will still have newlen >= needed.
|
||||
* Clamp to MaxAllocSize in case we went past it. Note we are
|
||||
* assuming here that MaxAllocSize <= INT_MAX/2, else the above loop
|
||||
* could overflow. We will still have newlen >= needed.
|
||||
*/
|
||||
if (newlen > (int) MaxAllocSize)
|
||||
newlen = (int) MaxAllocSize;
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.117 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.118 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -387,7 +387,7 @@ auth_failed(Port *port, int status)
|
||||
errstr = gettext_noop("PAM authentication failed for user \"%s\"");
|
||||
break;
|
||||
#endif /* USE_PAM */
|
||||
default :
|
||||
default:
|
||||
errstr = gettext_noop("Unknown auth method: authentication failed for user \"%s\"");
|
||||
break;
|
||||
}
|
||||
@@ -473,6 +473,7 @@ ClientAuthentication(Port *port)
|
||||
break;
|
||||
|
||||
case uaIdent:
|
||||
|
||||
/*
|
||||
* If we are doing ident on unix-domain sockets, use SCM_CREDS
|
||||
* only if it is defined and SO_PEERCRED isn't.
|
||||
@@ -483,6 +484,7 @@ ClientAuthentication(Port *port)
|
||||
if (port->raddr.addr.ss_family == AF_UNIX)
|
||||
{
|
||||
#if defined(HAVE_STRUCT_FCRED) || defined(HAVE_STRUCT_SOCKCRED)
|
||||
|
||||
/*
|
||||
* Receive credentials on next message receipt, BSD/OS,
|
||||
* NetBSD. We need to set this before the client sends the
|
||||
@@ -493,7 +495,7 @@ ClientAuthentication(Port *port)
|
||||
if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0)
|
||||
ereport(FATAL,
|
||||
(errcode_for_socket_access(),
|
||||
errmsg("could not enable credential reception: %m")));
|
||||
errmsg("could not enable credential reception: %m")));
|
||||
#endif
|
||||
|
||||
sendAuthRequest(port, AUTH_REQ_SCM_CREDS);
|
||||
@@ -770,8 +772,8 @@ recv_password_packet(Port *port)
|
||||
if (mtype != EOF)
|
||||
ereport(COMMERROR,
|
||||
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
||||
errmsg("expected password response, got message type %d",
|
||||
mtype)));
|
||||
errmsg("expected password response, got message type %d",
|
||||
mtype)));
|
||||
return NULL; /* EOF or bad message type */
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.73 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.74 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This should be moved to a more appropriate place. It is here
|
||||
@@ -507,8 +507,8 @@ AtEOXact_LargeObject(bool isCommit)
|
||||
currentContext = MemoryContextSwitchTo(fscxt);
|
||||
|
||||
/*
|
||||
* Close LO fds and clear cookies array so that LO fds are no longer good.
|
||||
* On abort we skip the close step.
|
||||
* Close LO fds and clear cookies array so that LO fds are no longer
|
||||
* good. On abort we skip the close step.
|
||||
*/
|
||||
for (i = 0; i < cookies_size; i++)
|
||||
{
|
||||
@@ -536,7 +536,7 @@ AtEOXact_LargeObject(bool isCommit)
|
||||
|
||||
/*
|
||||
* AtEOSubXact_LargeObject
|
||||
* Take care of large objects at subtransaction commit/abort
|
||||
* Take care of large objects at subtransaction commit/abort
|
||||
*
|
||||
* Reassign LOs created/opened during a committing subtransaction
|
||||
* to the parent transaction. On abort, just close them.
|
||||
@@ -545,7 +545,7 @@ void
|
||||
AtEOSubXact_LargeObject(bool isCommit, TransactionId myXid,
|
||||
TransactionId parentXid)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
if (fscxt == NULL) /* no LO operations in this xact */
|
||||
return;
|
||||
@@ -561,8 +561,8 @@ AtEOSubXact_LargeObject(bool isCommit, TransactionId myXid,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Make sure we do not call inv_close twice if it errors out
|
||||
* for some reason. Better a leak than a crash.
|
||||
* Make sure we do not call inv_close twice if it errors
|
||||
* out for some reason. Better a leak than a crash.
|
||||
*/
|
||||
deleteLOfd(i);
|
||||
inv_close(lo);
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.47 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.48 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
* Since the server static private key ($DataDir/server.key)
|
||||
* will normally be stored unencrypted so that the database
|
||||
@@ -294,7 +294,7 @@ rloop:
|
||||
ereport(COMMERROR,
|
||||
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
||||
errmsg("unrecognized SSL error code %d",
|
||||
SSL_get_error(port->ssl, n))));
|
||||
SSL_get_error(port->ssl, n))));
|
||||
n = -1;
|
||||
break;
|
||||
}
|
||||
@@ -379,7 +379,7 @@ wloop:
|
||||
ereport(COMMERROR,
|
||||
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
||||
errmsg("unrecognized SSL error code %d",
|
||||
SSL_get_error(port->ssl, n))));
|
||||
SSL_get_error(port->ssl, n))));
|
||||
n = -1;
|
||||
break;
|
||||
}
|
||||
@@ -546,8 +546,8 @@ tmp_dh_cb(SSL *s, int is_export, int keylength)
|
||||
if (r == NULL || 8 * DH_size(r) < keylength)
|
||||
{
|
||||
ereport(DEBUG2,
|
||||
(errmsg_internal("DH: generating parameters (%d bits)....",
|
||||
keylength)));
|
||||
(errmsg_internal("DH: generating parameters (%d bits)....",
|
||||
keylength)));
|
||||
r = DH_generate_parameters(keylength, DH_GENERATOR_2, NULL, NULL);
|
||||
}
|
||||
|
||||
@@ -651,13 +651,13 @@ initialize_SSL(void)
|
||||
errmsg("could not access private key file \"%s\": %m",
|
||||
fnbuf)));
|
||||
|
||||
/*
|
||||
/*
|
||||
* Require no public access to key file.
|
||||
*
|
||||
* XXX temporarily suppress check when on Windows, because there may
|
||||
* not be proper support for Unix-y file permissions. Need to think
|
||||
* of a reasonable check to apply on Windows. (See also the data
|
||||
* directory permission check in postmaster.c)
|
||||
* not be proper support for Unix-y file permissions. Need to
|
||||
* think of a reasonable check to apply on Windows. (See also the
|
||||
* data directory permission check in postmaster.c)
|
||||
*/
|
||||
#if !defined(__CYGWIN__) && !defined(WIN32)
|
||||
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.128 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.129 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -41,10 +41,10 @@
|
||||
/* Max size of username ident server can return */
|
||||
#define IDENT_USERNAME_MAX 512
|
||||
|
||||
/* Standard TCP port number for Ident service. Assigned by IANA */
|
||||
/* Standard TCP port number for Ident service. Assigned by IANA */
|
||||
#define IDENT_PORT 113
|
||||
|
||||
/* Name of the config file */
|
||||
/* Name of the config file */
|
||||
#define CONF_FILE "pg_hba.conf"
|
||||
|
||||
/* Name of the usermap file */
|
||||
@@ -66,17 +66,20 @@
|
||||
*/
|
||||
|
||||
/* pre-parsed content of CONF_FILE and corresponding line #s */
|
||||
static List *hba_lines = NIL;
|
||||
static List *hba_line_nums = NIL;
|
||||
static List *hba_lines = NIL;
|
||||
static List *hba_line_nums = NIL;
|
||||
|
||||
/* pre-parsed content of USERMAP_FILE and corresponding line #s */
|
||||
static List *ident_lines = NIL;
|
||||
static List *ident_line_nums = NIL;
|
||||
static List *ident_lines = NIL;
|
||||
static List *ident_line_nums = NIL;
|
||||
|
||||
/* pre-parsed content of group file and corresponding line #s */
|
||||
static List *group_lines = NIL;
|
||||
static List *group_line_nums = NIL;
|
||||
static List *group_lines = NIL;
|
||||
static List *group_line_nums = NIL;
|
||||
|
||||
/* pre-parsed content of user passwd file and corresponding line #s */
|
||||
static List *user_lines = NIL;
|
||||
static List *user_line_nums = NIL;
|
||||
static List *user_lines = NIL;
|
||||
static List *user_line_nums = NIL;
|
||||
|
||||
/* sorted entries so we can do binary search lookups */
|
||||
static List **user_sorted = NULL; /* sorted user list, for bsearch() */
|
||||
@@ -119,7 +122,7 @@ next_token(FILE *fp, char *buf, int bufsz)
|
||||
char *end_buf = buf + (bufsz - 2);
|
||||
bool in_quote = false;
|
||||
bool was_quote = false;
|
||||
bool saw_quote = false;
|
||||
bool saw_quote = false;
|
||||
|
||||
Assert(end_buf > start_buf);
|
||||
|
||||
@@ -134,8 +137,8 @@ next_token(FILE *fp, char *buf, int bufsz)
|
||||
}
|
||||
|
||||
/*
|
||||
* Build a token in buf of next characters up to EOF, EOL,
|
||||
* unquoted comma, or unquoted whitespace.
|
||||
* Build a token in buf of next characters up to EOF, EOL, unquoted
|
||||
* comma, or unquoted whitespace.
|
||||
*/
|
||||
while (c != EOF && c != '\n' &&
|
||||
(!pg_isblank(c) || in_quote == true))
|
||||
@@ -156,8 +159,8 @@ next_token(FILE *fp, char *buf, int bufsz)
|
||||
*buf = '\0';
|
||||
ereport(LOG,
|
||||
(errcode(ERRCODE_CONFIG_FILE_ERROR),
|
||||
errmsg("authentication file token too long, skipping: \"%s\"",
|
||||
start_buf)));
|
||||
errmsg("authentication file token too long, skipping: \"%s\"",
|
||||
start_buf)));
|
||||
/* Discard remainder of line */
|
||||
while ((c = getc(fp)) != EOF && c != '\n')
|
||||
;
|
||||
@@ -195,10 +198,10 @@ next_token(FILE *fp, char *buf, int bufsz)
|
||||
|
||||
*buf = '\0';
|
||||
|
||||
if (!saw_quote &&
|
||||
(strcmp(start_buf, "all") == 0 ||
|
||||
strcmp(start_buf, "sameuser") == 0 ||
|
||||
strcmp(start_buf, "samegroup") == 0))
|
||||
if (!saw_quote &&
|
||||
(strcmp(start_buf, "all") == 0 ||
|
||||
strcmp(start_buf, "sameuser") == 0 ||
|
||||
strcmp(start_buf, "samegroup") == 0))
|
||||
{
|
||||
/* append newline to a magical keyword */
|
||||
*buf++ = '\n';
|
||||
@@ -270,10 +273,10 @@ free_lines(List **lines, List **line_nums)
|
||||
if (*lines)
|
||||
{
|
||||
/*
|
||||
* "lines" is a list of lists; each of those sublists consists
|
||||
* of palloc'ed tokens, so we want to free each pointed-to
|
||||
* token in a sublist, followed by the sublist itself, and
|
||||
* finally the whole list.
|
||||
* "lines" is a list of lists; each of those sublists consists of
|
||||
* palloc'ed tokens, so we want to free each pointed-to token in a
|
||||
* sublist, followed by the sublist itself, and finally the whole
|
||||
* list.
|
||||
*/
|
||||
ListCell *line;
|
||||
|
||||
@@ -338,8 +341,8 @@ tokenize_inc_file(const char *inc_filename)
|
||||
/* Create comma-separate string from List */
|
||||
foreach(line, inc_lines)
|
||||
{
|
||||
List *token_list = (List *) lfirst(line);
|
||||
ListCell *token;
|
||||
List *token_list = (List *) lfirst(line);
|
||||
ListCell *token;
|
||||
|
||||
foreach(token, token_list)
|
||||
{
|
||||
@@ -455,7 +458,7 @@ get_group_line(const char *group)
|
||||
/*
|
||||
* Lookup a user name in the pg_shadow file
|
||||
*/
|
||||
List **
|
||||
List **
|
||||
get_user_line(const char *user)
|
||||
{
|
||||
/* On some versions of Solaris, bsearch of zero items dumps core */
|
||||
@@ -480,7 +483,7 @@ check_group(char *group, char *user)
|
||||
|
||||
if ((line = get_group_line(group)) != NULL)
|
||||
{
|
||||
ListCell *line_item;
|
||||
ListCell *line_item;
|
||||
|
||||
/* skip over the group name */
|
||||
for_each_cell(line_item, lnext(list_head(*line)))
|
||||
@@ -792,9 +795,9 @@ parse_hba(List *line, int line_num, hbaPort *port,
|
||||
if (addr.ss_family != port->raddr.addr.ss_family)
|
||||
{
|
||||
/*
|
||||
* Wrong address family. We allow only one case: if the
|
||||
* file has IPv4 and the port is IPv6, promote the file
|
||||
* address to IPv6 and try to match that way.
|
||||
* Wrong address family. We allow only one case: if the file
|
||||
* has IPv4 and the port is IPv6, promote the file address to
|
||||
* IPv6 and try to match that way.
|
||||
*/
|
||||
#ifdef HAVE_IPV6
|
||||
if (addr.ss_family == AF_INET &&
|
||||
@@ -804,7 +807,7 @@ parse_hba(List *line, int line_num, hbaPort *port,
|
||||
promote_v4_to_v6_mask(&mask);
|
||||
}
|
||||
else
|
||||
#endif /* HAVE_IPV6 */
|
||||
#endif /* HAVE_IPV6 */
|
||||
{
|
||||
/* Line doesn't match client port, so ignore it. */
|
||||
return;
|
||||
@@ -846,8 +849,8 @@ hba_syntax:
|
||||
else
|
||||
ereport(LOG,
|
||||
(errcode(ERRCODE_CONFIG_FILE_ERROR),
|
||||
errmsg("missing field in pg_hba.conf file at end of line %d",
|
||||
line_num)));
|
||||
errmsg("missing field in pg_hba.conf file at end of line %d",
|
||||
line_num)));
|
||||
|
||||
/* Come here if suitable message already logged */
|
||||
hba_other_error:
|
||||
@@ -1041,7 +1044,8 @@ load_hba(void)
|
||||
conf_file = pstrdup(guc_hbafile);
|
||||
else
|
||||
{
|
||||
char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
|
||||
char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
|
||||
|
||||
/* put together the full pathname to the config file */
|
||||
conf_file = palloc(strlen(confloc) + strlen(CONF_FILE) + 2);
|
||||
sprintf(conf_file, "%s/%s", confloc, CONF_FILE);
|
||||
@@ -1160,7 +1164,8 @@ check_ident_usermap(const char *usermap_name,
|
||||
}
|
||||
else
|
||||
{
|
||||
ListCell *line_cell, *num_cell;
|
||||
ListCell *line_cell,
|
||||
*num_cell;
|
||||
|
||||
forboth(line_cell, ident_lines, num_cell, ident_line_nums)
|
||||
{
|
||||
@@ -1184,6 +1189,7 @@ load_ident(void)
|
||||
FILE *file; /* The map file we have to read */
|
||||
char *map_file; /* The name of the map file we have to
|
||||
* read */
|
||||
|
||||
if (ident_lines || ident_line_nums)
|
||||
free_lines(&ident_lines, &ident_line_nums);
|
||||
|
||||
@@ -1193,11 +1199,12 @@ load_ident(void)
|
||||
else
|
||||
{
|
||||
/* put together the full pathname to the map file */
|
||||
char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
|
||||
char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
|
||||
|
||||
map_file = (char *) palloc(strlen(confloc) + strlen(USERMAP_FILE) + 2);
|
||||
sprintf(map_file, "%s/%s", confloc, USERMAP_FILE);
|
||||
}
|
||||
|
||||
|
||||
file = AllocateFile(map_file, "r");
|
||||
if (file == NULL)
|
||||
{
|
||||
@@ -1225,7 +1232,7 @@ static bool
|
||||
interpret_ident_response(const char *ident_response,
|
||||
char *ident_user)
|
||||
{
|
||||
const char *cursor = ident_response; /* Cursor into
|
||||
const char *cursor = ident_response; /* Cursor into
|
||||
* *ident_response */
|
||||
|
||||
/*
|
||||
@@ -1353,7 +1360,8 @@ ident_inet(const SockAddr remote_addr,
|
||||
hints.ai_addr = NULL;
|
||||
hints.ai_next = NULL;
|
||||
rc = getaddrinfo_all(remote_addr_s, ident_port, &hints, &ident_serv);
|
||||
if (rc || !ident_serv) {
|
||||
if (rc || !ident_serv)
|
||||
{
|
||||
if (ident_serv)
|
||||
freeaddrinfo_all(hints.ai_family, ident_serv);
|
||||
return false; /* we don't expect this to happen */
|
||||
@@ -1368,7 +1376,8 @@ ident_inet(const SockAddr remote_addr,
|
||||
hints.ai_addr = NULL;
|
||||
hints.ai_next = NULL;
|
||||
rc = getaddrinfo_all(local_addr_s, NULL, &hints, &la);
|
||||
if (rc || !la) {
|
||||
if (rc || !la)
|
||||
{
|
||||
if (la)
|
||||
freeaddrinfo_all(hints.ai_family, la);
|
||||
return false; /* we don't expect this to happen */
|
||||
@@ -1453,8 +1462,8 @@ ident_inet(const SockAddr remote_addr,
|
||||
ident_return = interpret_ident_response(ident_response, ident_user);
|
||||
if (!ident_return)
|
||||
ereport(LOG,
|
||||
(errmsg("invalidly formatted response from Ident server: \"%s\"",
|
||||
ident_response)));
|
||||
(errmsg("invalidly formatted response from Ident server: \"%s\"",
|
||||
ident_response)));
|
||||
|
||||
ident_inet_done:
|
||||
if (sock_fd >= 0)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.27 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.28 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
* This file and the IPV6 implementation were initially provided by
|
||||
* Nigel Kukard <nkukard@lbsd.net>, Linux Based Systems Design
|
||||
@@ -34,8 +34,8 @@
|
||||
#endif
|
||||
#include <arpa/inet.h>
|
||||
#include <sys/file.h>
|
||||
|
||||
#endif /* !defined(_MSC_VER) && !defined(__BORLANDC__) */
|
||||
#endif /* !defined(_MSC_VER) &&
|
||||
* !defined(__BORLANDC__) */
|
||||
|
||||
#include "libpq/ip.h"
|
||||
|
||||
@@ -67,7 +67,7 @@ static int getnameinfo_unix(const struct sockaddr_un * sa, int salen,
|
||||
*/
|
||||
int
|
||||
getaddrinfo_all(const char *hostname, const char *servname,
|
||||
const struct addrinfo *hintp, struct addrinfo **result)
|
||||
const struct addrinfo * hintp, struct addrinfo ** result)
|
||||
{
|
||||
/* not all versions of getaddrinfo() zero *result on failure */
|
||||
*result = NULL;
|
||||
@@ -269,7 +269,6 @@ getnameinfo_unix(const struct sockaddr_un * sa, int salen,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* HAVE_UNIX_SOCKETS */
|
||||
|
||||
|
||||
@@ -328,7 +327,6 @@ rangeSockAddrAF_INET6(const struct sockaddr_in6 * addr,
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -472,4 +470,4 @@ promote_v4_to_v6_mask(struct sockaddr_storage * addr)
|
||||
memcpy(addr, &addr6, sizeof(addr6));
|
||||
}
|
||||
|
||||
#endif /* HAVE_IPV6 */
|
||||
#endif /* HAVE_IPV6 */
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.170 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.171 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -307,7 +307,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_socket_access(),
|
||||
/* translator: %s is IPv4, IPv6, or Unix */
|
||||
/* translator: %s is IPv4, IPv6, or Unix */
|
||||
errmsg("could not create %s socket: %m",
|
||||
familyDesc)));
|
||||
continue;
|
||||
@@ -352,7 +352,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_socket_access(),
|
||||
/* translator: %s is IPv4, IPv6, or Unix */
|
||||
/* translator: %s is IPv4, IPv6, or Unix */
|
||||
errmsg("could not bind %s socket: %m",
|
||||
familyDesc),
|
||||
(IS_AF_UNIX(addr->ai_family)) ?
|
||||
@@ -392,7 +392,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_socket_access(),
|
||||
/* translator: %s is IPv4, IPv6, or Unix */
|
||||
/* translator: %s is IPv4, IPv6, or Unix */
|
||||
errmsg("could not listen on %s socket: %m",
|
||||
familyDesc)));
|
||||
closesocket(fd);
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.36 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.37 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This shouldn't be in libpq, but the monitor and some other
|
||||
@@ -50,6 +50,7 @@
|
||||
sigset_t UnBlockSig,
|
||||
BlockSig,
|
||||
AuthBlockSig;
|
||||
|
||||
#else
|
||||
int UnBlockSig,
|
||||
BlockSig,
|
||||
@@ -169,4 +170,4 @@ pqsignal(int signo, pqsigfunc func)
|
||||
#endif /* !HAVE_POSIX_SIGNALS */
|
||||
}
|
||||
|
||||
#endif /* WIN32 */
|
||||
#endif /* WIN32 */
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/main/main.c,v 1.88 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/main/main.c,v 1.89 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -110,7 +110,7 @@ main(int argc, char *argv[])
|
||||
if (err != 0)
|
||||
{
|
||||
write_stderr("%s: WSAStartup failed: %d\n",
|
||||
argv[0], err);
|
||||
argv[0], err);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -215,7 +215,7 @@ main(int argc, char *argv[])
|
||||
write_stderr("\"root\" execution of the PostgreSQL server is not permitted.\n"
|
||||
"The server must be started under an unprivileged user ID to prevent\n"
|
||||
"possible system security compromise. See the documentation for\n"
|
||||
"more information on how to properly start the server.\n");
|
||||
"more information on how to properly start the server.\n");
|
||||
exit(1);
|
||||
}
|
||||
#endif /* !__BEOS__ */
|
||||
@@ -235,13 +235,13 @@ main(int argc, char *argv[])
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
#else /* WIN32 */
|
||||
#else /* WIN32 */
|
||||
if (pgwin32_is_admin())
|
||||
{
|
||||
write_stderr("execution of PostgreSQL by a user with administrative permissions is not permitted.\n"
|
||||
"The server must be started under an unprivileged user ID to prevent\n"
|
||||
"possible system security compromise. See the documentation for\n"
|
||||
"more information on how to properly start the server.\n");
|
||||
"more information on how to properly start the server.\n");
|
||||
exit(1);
|
||||
}
|
||||
#endif /* !WIN32 */
|
||||
@@ -276,8 +276,8 @@ main(int argc, char *argv[])
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the first argument is "-boot", then invoke bootstrap mode.
|
||||
* (This path is taken only for a standalone bootstrap process.)
|
||||
* If the first argument is "-boot", then invoke bootstrap mode. (This
|
||||
* path is taken only for a standalone bootstrap process.)
|
||||
*/
|
||||
if (argc > 1 && strcmp(argv[1], "-boot") == 0)
|
||||
exit(BootstrapMain(argc, argv));
|
||||
@@ -312,11 +312,11 @@ main(int argc, char *argv[])
|
||||
if (!GetUserName(pw_name_persist, &namesize))
|
||||
{
|
||||
write_stderr("%s: could not determine user name (GetUserName failed)\n",
|
||||
argv[0]);
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
#endif /* WIN32 */
|
||||
#endif /* WIN32 */
|
||||
|
||||
exit(PostgresMain(argc, argv, pw_name_persist));
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.291 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.292 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -942,7 +942,7 @@ _copyArrayExpr(ArrayExpr *from)
|
||||
static RowExpr *
|
||||
_copyRowExpr(RowExpr *from)
|
||||
{
|
||||
RowExpr *newnode = makeNode(RowExpr);
|
||||
RowExpr *newnode = makeNode(RowExpr);
|
||||
|
||||
COPY_NODE_FIELD(args);
|
||||
COPY_SCALAR_FIELD(row_typeid);
|
||||
@@ -1402,7 +1402,7 @@ _copyTypeName(TypeName *from)
|
||||
static SortBy *
|
||||
_copySortBy(SortBy *from)
|
||||
{
|
||||
SortBy *newnode = makeNode(SortBy);
|
||||
SortBy *newnode = makeNode(SortBy);
|
||||
|
||||
COPY_SCALAR_FIELD(sortby_kind);
|
||||
COPY_NODE_FIELD(useOp);
|
||||
@@ -2499,9 +2499,9 @@ _copyDeallocateStmt(DeallocateStmt *from)
|
||||
static List *
|
||||
_copyList(List *from)
|
||||
{
|
||||
List *new;
|
||||
ListCell *curr_old;
|
||||
ListCell *prev_new;
|
||||
List *new;
|
||||
ListCell *curr_old;
|
||||
ListCell *prev_new;
|
||||
|
||||
Assert(list_length(from) >= 1);
|
||||
|
||||
@@ -2779,10 +2779,10 @@ copyObject(void *from)
|
||||
case T_List:
|
||||
retval = _copyList(from);
|
||||
break;
|
||||
|
||||
/*
|
||||
* Lists of integers and OIDs don't need to be
|
||||
* deep-copied, so we perform a shallow copy via
|
||||
* list_copy()
|
||||
* Lists of integers and OIDs don't need to be deep-copied, so
|
||||
* we perform a shallow copy via list_copy()
|
||||
*/
|
||||
case T_IntList:
|
||||
case T_OidList:
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.230 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.231 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -1680,19 +1680,19 @@ _equalFkConstraint(FkConstraint *a, FkConstraint *b)
|
||||
static bool
|
||||
_equalList(List *a, List *b)
|
||||
{
|
||||
ListCell *item_a;
|
||||
ListCell *item_b;
|
||||
ListCell *item_a;
|
||||
ListCell *item_b;
|
||||
|
||||
/*
|
||||
* Try to reject by simple scalar checks before grovelling through
|
||||
* all the list elements...
|
||||
* Try to reject by simple scalar checks before grovelling through all
|
||||
* the list elements...
|
||||
*/
|
||||
COMPARE_SCALAR_FIELD(type);
|
||||
COMPARE_SCALAR_FIELD(length);
|
||||
|
||||
/*
|
||||
* We place the switch outside the loop for the sake of
|
||||
* efficiency; this may not be worth doing...
|
||||
* We place the switch outside the loop for the sake of efficiency;
|
||||
* this may not be worth doing...
|
||||
*/
|
||||
switch (a->type)
|
||||
{
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.61 2004/08/29 04:12:32 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.62 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -50,9 +50,10 @@ check_list_invariants(List *list)
|
||||
Assert(list->head->next == list->tail);
|
||||
Assert(list->tail->next == NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
#define check_list_invariants(l)
|
||||
#endif /* USE_ASSERT_CHECKING */
|
||||
#endif /* USE_ASSERT_CHECKING */
|
||||
|
||||
/*
|
||||
* Return a freshly allocated List. Since empty non-NIL lists are
|
||||
@@ -62,8 +63,8 @@ check_list_invariants(List *list)
|
||||
static List *
|
||||
new_list(NodeTag type)
|
||||
{
|
||||
List *new_list;
|
||||
ListCell *new_head;
|
||||
List *new_list;
|
||||
ListCell *new_head;
|
||||
|
||||
new_head = (ListCell *) palloc(sizeof(*new_head));
|
||||
new_head->next = NULL;
|
||||
@@ -88,7 +89,7 @@ new_list(NodeTag type)
|
||||
static void
|
||||
new_head_cell(List *list)
|
||||
{
|
||||
ListCell *new_head;
|
||||
ListCell *new_head;
|
||||
|
||||
new_head = (ListCell *) palloc(sizeof(*new_head));
|
||||
new_head->next = list->head;
|
||||
@@ -107,7 +108,7 @@ new_head_cell(List *list)
|
||||
static void
|
||||
new_tail_cell(List *list)
|
||||
{
|
||||
ListCell *new_tail;
|
||||
ListCell *new_tail;
|
||||
|
||||
new_tail = (ListCell *) palloc(sizeof(*new_tail));
|
||||
new_tail->next = NULL;
|
||||
@@ -142,7 +143,7 @@ lappend(List *list, void *datum)
|
||||
/*
|
||||
* Append an integer to the specified list. See lappend()
|
||||
*/
|
||||
List *
|
||||
List *
|
||||
lappend_int(List *list, int datum)
|
||||
{
|
||||
Assert(IsIntegerList(list));
|
||||
@@ -160,7 +161,7 @@ lappend_int(List *list, int datum)
|
||||
/*
|
||||
* Append an OID to the specified list. See lappend()
|
||||
*/
|
||||
List *
|
||||
List *
|
||||
lappend_oid(List *list, Oid datum)
|
||||
{
|
||||
Assert(IsOidList(list));
|
||||
@@ -184,7 +185,7 @@ lappend_oid(List *list, Oid datum)
|
||||
static ListCell *
|
||||
add_new_cell(List *list, ListCell *prev_cell)
|
||||
{
|
||||
ListCell *new_cell;
|
||||
ListCell *new_cell;
|
||||
|
||||
new_cell = (ListCell *) palloc(sizeof(*new_cell));
|
||||
/* new_cell->data is left undefined! */
|
||||
@@ -208,7 +209,7 @@ add_new_cell(List *list, ListCell *prev_cell)
|
||||
ListCell *
|
||||
lappend_cell(List *list, ListCell *prev, void *datum)
|
||||
{
|
||||
ListCell *new_cell;
|
||||
ListCell *new_cell;
|
||||
|
||||
Assert(IsPointerList(list));
|
||||
|
||||
@@ -221,7 +222,7 @@ lappend_cell(List *list, ListCell *prev, void *datum)
|
||||
ListCell *
|
||||
lappend_cell_int(List *list, ListCell *prev, int datum)
|
||||
{
|
||||
ListCell *new_cell;
|
||||
ListCell *new_cell;
|
||||
|
||||
Assert(IsIntegerList(list));
|
||||
|
||||
@@ -234,7 +235,7 @@ lappend_cell_int(List *list, ListCell *prev, int datum)
|
||||
ListCell *
|
||||
lappend_cell_oid(List *list, ListCell *prev, Oid datum)
|
||||
{
|
||||
ListCell *new_cell;
|
||||
ListCell *new_cell;
|
||||
|
||||
Assert(IsOidList(list));
|
||||
|
||||
@@ -291,7 +292,7 @@ lcons_int(int datum, List *list)
|
||||
/*
|
||||
* Prepend an OID to the list. See lcons()
|
||||
*/
|
||||
List *
|
||||
List *
|
||||
lcons_oid(Oid datum, List *list)
|
||||
{
|
||||
Assert(IsOidList(list));
|
||||
@@ -349,18 +350,18 @@ list_concat(List *list1, List *list2)
|
||||
List *
|
||||
list_truncate(List *list, int new_size)
|
||||
{
|
||||
ListCell *cell;
|
||||
int n;
|
||||
ListCell *cell;
|
||||
int n;
|
||||
|
||||
if (new_size <= 0)
|
||||
return NIL; /* truncate to zero length */
|
||||
return NIL; /* truncate to zero length */
|
||||
|
||||
/* If asked to effectively extend the list, do nothing */
|
||||
if (new_size >= list_length(list))
|
||||
return list;
|
||||
|
||||
n = 1;
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (n == new_size)
|
||||
{
|
||||
@@ -385,7 +386,7 @@ list_truncate(List *list, int new_size)
|
||||
static ListCell *
|
||||
list_nth_cell(List *list, int n)
|
||||
{
|
||||
ListCell *match;
|
||||
ListCell *match;
|
||||
|
||||
Assert(list != NIL);
|
||||
Assert(n >= 0);
|
||||
@@ -443,12 +444,12 @@ list_nth_oid(List *list, int n)
|
||||
bool
|
||||
list_member(List *list, void *datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsPointerList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (equal(lfirst(cell), datum))
|
||||
return true;
|
||||
@@ -464,12 +465,12 @@ list_member(List *list, void *datum)
|
||||
bool
|
||||
list_member_ptr(List *list, void *datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsPointerList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (lfirst(cell) == datum)
|
||||
return true;
|
||||
@@ -484,12 +485,12 @@ list_member_ptr(List *list, void *datum)
|
||||
bool
|
||||
list_member_int(List *list, int datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsIntegerList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (lfirst_int(cell) == datum)
|
||||
return true;
|
||||
@@ -504,12 +505,12 @@ list_member_int(List *list, int datum)
|
||||
bool
|
||||
list_member_oid(List *list, Oid datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsOidList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (lfirst_oid(cell) == datum)
|
||||
return true;
|
||||
@@ -543,8 +544,8 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev)
|
||||
|
||||
/*
|
||||
* Otherwise, adjust the necessary list links, deallocate the
|
||||
* particular node we have just removed, and return the list we
|
||||
* were given.
|
||||
* particular node we have just removed, and return the list we were
|
||||
* given.
|
||||
*/
|
||||
list->length--;
|
||||
|
||||
@@ -567,14 +568,14 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev)
|
||||
List *
|
||||
list_delete(List *list, void *datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
|
||||
Assert(IsPointerList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
prev = NULL;
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (equal(lfirst(cell), datum))
|
||||
return list_delete_cell(list, cell, prev);
|
||||
@@ -590,14 +591,14 @@ list_delete(List *list, void *datum)
|
||||
List *
|
||||
list_delete_ptr(List *list, void *datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
|
||||
Assert(IsPointerList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
prev = NULL;
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (lfirst(cell) == datum)
|
||||
return list_delete_cell(list, cell, prev);
|
||||
@@ -613,14 +614,14 @@ list_delete_ptr(List *list, void *datum)
|
||||
List *
|
||||
list_delete_int(List *list, int datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
|
||||
Assert(IsIntegerList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
prev = NULL;
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (lfirst_int(cell) == datum)
|
||||
return list_delete_cell(list, cell, prev);
|
||||
@@ -636,14 +637,14 @@ list_delete_int(List *list, int datum)
|
||||
List *
|
||||
list_delete_oid(List *list, Oid datum)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
ListCell *cell;
|
||||
ListCell *prev;
|
||||
|
||||
Assert(IsOidList(list));
|
||||
check_list_invariants(list);
|
||||
|
||||
prev = NULL;
|
||||
foreach (cell, list)
|
||||
foreach(cell, list)
|
||||
{
|
||||
if (lfirst_oid(cell) == datum)
|
||||
return list_delete_cell(list, cell, prev);
|
||||
@@ -693,8 +694,8 @@ list_delete_first(List *list)
|
||||
List *
|
||||
list_union(List *list1, List *list2)
|
||||
{
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsPointerList(list1));
|
||||
Assert(IsPointerList(list2));
|
||||
@@ -717,8 +718,8 @@ list_union(List *list1, List *list2)
|
||||
List *
|
||||
list_union_ptr(List *list1, List *list2)
|
||||
{
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsPointerList(list1));
|
||||
Assert(IsPointerList(list2));
|
||||
@@ -740,8 +741,8 @@ list_union_ptr(List *list1, List *list2)
|
||||
List *
|
||||
list_union_int(List *list1, List *list2)
|
||||
{
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsIntegerList(list1));
|
||||
Assert(IsIntegerList(list2));
|
||||
@@ -763,8 +764,8 @@ list_union_int(List *list1, List *list2)
|
||||
List *
|
||||
list_union_oid(List *list1, List *list2)
|
||||
{
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
List *result;
|
||||
ListCell *cell;
|
||||
|
||||
Assert(IsOidList(list1));
|
||||
Assert(IsOidList(list2));
|
||||
@@ -792,8 +793,8 @@ list_union_oid(List *list1, List *list2)
|
||||
List *
|
||||
list_difference(List *list1, List *list2)
|
||||
{
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
|
||||
Assert(IsPointerList(list1));
|
||||
Assert(IsPointerList(list2));
|
||||
@@ -801,7 +802,7 @@ list_difference(List *list1, List *list2)
|
||||
if (list2 == NIL)
|
||||
return list_copy(list1);
|
||||
|
||||
foreach (cell, list1)
|
||||
foreach(cell, list1)
|
||||
{
|
||||
if (!list_member(list2, lfirst(cell)))
|
||||
result = lappend(result, lfirst(cell));
|
||||
@@ -818,8 +819,8 @@ list_difference(List *list1, List *list2)
|
||||
List *
|
||||
list_difference_ptr(List *list1, List *list2)
|
||||
{
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
|
||||
Assert(IsPointerList(list1));
|
||||
Assert(IsPointerList(list2));
|
||||
@@ -827,7 +828,7 @@ list_difference_ptr(List *list1, List *list2)
|
||||
if (list2 == NIL)
|
||||
return list_copy(list1);
|
||||
|
||||
foreach (cell, list1)
|
||||
foreach(cell, list1)
|
||||
{
|
||||
if (!list_member_ptr(list2, lfirst(cell)))
|
||||
result = lappend(result, lfirst(cell));
|
||||
@@ -843,8 +844,8 @@ list_difference_ptr(List *list1, List *list2)
|
||||
List *
|
||||
list_difference_int(List *list1, List *list2)
|
||||
{
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
|
||||
Assert(IsIntegerList(list1));
|
||||
Assert(IsIntegerList(list2));
|
||||
@@ -852,7 +853,7 @@ list_difference_int(List *list1, List *list2)
|
||||
if (list2 == NIL)
|
||||
return list_copy(list1);
|
||||
|
||||
foreach (cell, list1)
|
||||
foreach(cell, list1)
|
||||
{
|
||||
if (!list_member_int(list2, lfirst_int(cell)))
|
||||
result = lappend_int(result, lfirst_int(cell));
|
||||
@@ -868,8 +869,8 @@ list_difference_int(List *list1, List *list2)
|
||||
List *
|
||||
list_difference_oid(List *list1, List *list2)
|
||||
{
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
ListCell *cell;
|
||||
List *result = NIL;
|
||||
|
||||
Assert(IsOidList(list1));
|
||||
Assert(IsOidList(list2));
|
||||
@@ -877,7 +878,7 @@ list_difference_oid(List *list1, List *list2)
|
||||
if (list2 == NIL)
|
||||
return list_copy(list1);
|
||||
|
||||
foreach (cell, list1)
|
||||
foreach(cell, list1)
|
||||
{
|
||||
if (!list_member_oid(list2, lfirst_oid(cell)))
|
||||
result = lappend_oid(result, lfirst_oid(cell));
|
||||
@@ -891,14 +892,14 @@ list_difference_oid(List *list1, List *list2)
|
||||
static void
|
||||
list_free_private(List *list, bool deep)
|
||||
{
|
||||
ListCell *cell;
|
||||
ListCell *cell;
|
||||
|
||||
check_list_invariants(list);
|
||||
|
||||
cell = list_head(list);
|
||||
while (cell != NULL)
|
||||
{
|
||||
ListCell *tmp = cell;
|
||||
ListCell *tmp = cell;
|
||||
|
||||
cell = lnext(cell);
|
||||
if (deep)
|
||||
@@ -948,9 +949,9 @@ list_free_deep(List *list)
|
||||
List *
|
||||
list_copy(List *oldlist)
|
||||
{
|
||||
List *newlist;
|
||||
ListCell *newlist_prev;
|
||||
ListCell *oldlist_cur;
|
||||
List *newlist;
|
||||
ListCell *newlist_prev;
|
||||
ListCell *oldlist_cur;
|
||||
|
||||
if (oldlist == NIL)
|
||||
return NIL;
|
||||
@@ -968,7 +969,7 @@ list_copy(List *oldlist)
|
||||
oldlist_cur = oldlist->head->next;
|
||||
while (oldlist_cur)
|
||||
{
|
||||
ListCell *newlist_cur;
|
||||
ListCell *newlist_cur;
|
||||
|
||||
newlist_cur = (ListCell *) palloc(sizeof(*newlist_cur));
|
||||
newlist_cur->data = oldlist_cur->data;
|
||||
@@ -991,9 +992,9 @@ list_copy(List *oldlist)
|
||||
List *
|
||||
list_copy_tail(List *oldlist, int nskip)
|
||||
{
|
||||
List *newlist;
|
||||
ListCell *newlist_prev;
|
||||
ListCell *oldlist_cur;
|
||||
List *newlist;
|
||||
ListCell *newlist_prev;
|
||||
ListCell *oldlist_cur;
|
||||
|
||||
if (nskip < 0)
|
||||
nskip = 0; /* would it be better to elog? */
|
||||
@@ -1012,8 +1013,8 @@ list_copy_tail(List *oldlist, int nskip)
|
||||
oldlist_cur = oldlist_cur->next;
|
||||
|
||||
/*
|
||||
* Copy over the data in the first remaining cell; new_list() has already
|
||||
* allocated the head cell itself
|
||||
* Copy over the data in the first remaining cell; new_list() has
|
||||
* already allocated the head cell itself
|
||||
*/
|
||||
newlist->head->data = oldlist_cur->data;
|
||||
|
||||
@@ -1021,7 +1022,7 @@ list_copy_tail(List *oldlist, int nskip)
|
||||
oldlist_cur = oldlist_cur->next;
|
||||
while (oldlist_cur)
|
||||
{
|
||||
ListCell *newlist_cur;
|
||||
ListCell *newlist_cur;
|
||||
|
||||
newlist_cur = (ListCell *) palloc(sizeof(*newlist_cur));
|
||||
newlist_cur->data = oldlist_cur->data;
|
||||
@@ -1063,8 +1064,7 @@ list_length(List *l)
|
||||
{
|
||||
return l ? l->length : 0;
|
||||
}
|
||||
|
||||
#endif /* ! __GNUC__ */
|
||||
#endif /* ! __GNUC__ */
|
||||
|
||||
/*
|
||||
* Temporary compatibility functions
|
||||
@@ -1082,7 +1082,7 @@ list_length(List *l)
|
||||
* list_length() macro in order to avoid the overhead of a function
|
||||
* call.
|
||||
*/
|
||||
int length(List *list);
|
||||
int length(List *list);
|
||||
|
||||
int
|
||||
length(List *list)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.242 2004/08/29 04:12:33 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.243 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Every node type that can appear in stored rules' parsetrees *must*
|
||||
@@ -138,7 +138,7 @@ _outToken(StringInfo str, char *s)
|
||||
static void
|
||||
_outList(StringInfo str, List *node)
|
||||
{
|
||||
ListCell *lc;
|
||||
ListCell *lc;
|
||||
|
||||
appendStringInfoChar(str, '(');
|
||||
|
||||
@@ -147,12 +147,12 @@ _outList(StringInfo str, List *node)
|
||||
else if (IsA(node, OidList))
|
||||
appendStringInfoChar(str, 'o');
|
||||
|
||||
foreach (lc, node)
|
||||
foreach(lc, node)
|
||||
{
|
||||
/*
|
||||
* For the sake of backward compatibility, we emit a slightly
|
||||
* different whitespace format for lists of nodes vs. other
|
||||
* types of lists. XXX: is this necessary?
|
||||
* different whitespace format for lists of nodes vs. other types
|
||||
* of lists. XXX: is this necessary?
|
||||
*/
|
||||
if (IsA(node, List))
|
||||
{
|
||||
@@ -165,8 +165,8 @@ _outList(StringInfo str, List *node)
|
||||
else if (IsA(node, OidList))
|
||||
appendStringInfo(str, " %u", lfirst_oid(lc));
|
||||
else
|
||||
elog(ERROR, "unrecognized list node type: %d",
|
||||
(int) node->type);
|
||||
elog(ERROR, "unrecognized list node type: %d",
|
||||
(int) node->type);
|
||||
}
|
||||
|
||||
appendStringInfoChar(str, ')');
|
||||
@@ -1450,6 +1450,7 @@ _outValue(StringInfo str, Value *value)
|
||||
appendStringInfo(str, "%ld", value->val.ival);
|
||||
break;
|
||||
case T_Float:
|
||||
|
||||
/*
|
||||
* We assume the value is a valid numeric literal and so does
|
||||
* not need quoting.
|
||||
@@ -1595,7 +1596,7 @@ _outNode(StringInfo str, void *obj)
|
||||
{
|
||||
if (obj == NULL)
|
||||
appendStringInfo(str, "<>");
|
||||
else if (IsA(obj, List) || IsA(obj, IntList) || IsA(obj, OidList))
|
||||
else if (IsA(obj, List) ||IsA(obj, IntList) || IsA(obj, OidList))
|
||||
_outList(str, obj);
|
||||
else if (IsA(obj, Integer) ||
|
||||
IsA(obj, Float) ||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.2 2004/08/29 04:12:33 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.3 2004/08/29 05:06:43 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -28,7 +28,8 @@ ParamListInfo
|
||||
copyParamList(ParamListInfo from)
|
||||
{
|
||||
ParamListInfo retval;
|
||||
int i, size;
|
||||
int i,
|
||||
size;
|
||||
|
||||
if (from == NULL)
|
||||
return NULL;
|
||||
@@ -39,7 +40,8 @@ copyParamList(ParamListInfo from)
|
||||
|
||||
retval = (ParamListInfo) palloc0((size + 1) * sizeof(ParamListInfoData));
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
for (i = 0; i < size; i++)
|
||||
{
|
||||
/* copy metadata */
|
||||
retval[i].kind = from[i].kind;
|
||||
if (from[i].kind == PARAM_NAMED)
|
||||
@@ -51,12 +53,12 @@ copyParamList(ParamListInfo from)
|
||||
retval[i].isnull = from[i].isnull;
|
||||
if (from[i].isnull)
|
||||
{
|
||||
retval[i].value = from[i].value; /* nulls just copy */
|
||||
retval[i].value = from[i].value; /* nulls just copy */
|
||||
}
|
||||
else
|
||||
{
|
||||
int16 typLen;
|
||||
bool typByVal;
|
||||
int16 typLen;
|
||||
bool typByVal;
|
||||
|
||||
get_typlenbyval(from[i].ptype, &typLen, &typByVal);
|
||||
retval[i].value = datumCopy(from[i].value, typByVal, typLen);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user