1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-02 09:02:37 +03:00

Standard pgindent run for 8.1.

This commit is contained in:
Bruce Momjian
2005-10-15 02:49:52 +00:00
parent 790c01d280
commit 1dc3498251
770 changed files with 34334 additions and 32507 deletions

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.84 2005/05/15 21:19:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -251,10 +251,10 @@ ExecMarkPos(PlanState *node)
*
* NOTE: the semantics of this are that the first ExecProcNode following
* the restore operation will yield the same tuple as the first one following
* the mark operation. It is unspecified what happens to the plan node's
* the mark operation. It is unspecified what happens to the plan node's
* result TupleTableSlot. (In most cases the result slot is unchanged by
* a restore, but the node may choose to clear it or to load it with the
* restored-to tuple.) Hence the caller should discard any previously
* restored-to tuple.) Hence the caller should discard any previously
* returned TupleTableSlot after doing a restore.
*/
void
@ -398,15 +398,14 @@ ExecMayReturnRawTuples(PlanState *node)
{
/*
* At a table scan node, we check whether ExecAssignScanProjectionInfo
* decided to do projection or not. Most non-scan nodes always
* project and so we can return "false" immediately. For nodes that
* don't project but just pass up input tuples, we have to recursively
* examine the input plan node.
* decided to do projection or not. Most non-scan nodes always project
* and so we can return "false" immediately. For nodes that don't project
* but just pass up input tuples, we have to recursively examine the input
* plan node.
*
* Note: Hash and Material are listed here because they sometimes return
* an original input tuple, not a copy. But Sort and SetOp never
* return an original tuple, so they can be treated like projecting
* nodes.
* Note: Hash and Material are listed here because they sometimes return an
* original input tuple, not a copy. But Sort and SetOp never return an
* original tuple, so they can be treated like projecting nodes.
*/
switch (nodeTag(node))
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.15 2005/05/29 04:23:03 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,11 +66,10 @@ execTuplesMatch(TupleTableSlot *slot1,
oldContext = MemoryContextSwitchTo(evalContext);
/*
* We cannot report a match without checking all the fields, but we
* can report a non-match as soon as we find unequal fields. So,
* start comparing at the last field (least significant sort key).
* That's the most likely to be different if we are dealing with
* sorted input.
* We cannot report a match without checking all the fields, but we can
* report a non-match as soon as we find unequal fields. So, start
* comparing at the last field (least significant sort key). That's the
* most likely to be different if we are dealing with sorted input.
*/
result = true;
@ -137,11 +136,10 @@ execTuplesUnequal(TupleTableSlot *slot1,
oldContext = MemoryContextSwitchTo(evalContext);
/*
* We cannot report a match without checking all the fields, but we
* can report a non-match as soon as we find unequal fields. So,
* start comparing at the last field (least significant sort key).
* That's the most likely to be different if we are dealing with
* sorted input.
* We cannot report a match without checking all the fields, but we can
* report a non-match as soon as we find unequal fields. So, start
* comparing at the last field (least significant sort key). That's the
* most likely to be different if we are dealing with sorted input.
*/
result = false;
@ -288,7 +286,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
Assert(entrysize >= sizeof(TupleHashEntryData));
hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt,
sizeof(TupleHashTableData));
sizeof(TupleHashTableData));
hashtable->numCols = numCols;
hashtable->keyColIdx = keyColIdx;
@ -297,7 +295,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
hashtable->tablecxt = tablecxt;
hashtable->tempcxt = tempcxt;
hashtable->entrysize = entrysize;
hashtable->tableslot = NULL; /* will be made on first lookup */
hashtable->tableslot = NULL; /* will be made on first lookup */
hashtable->inputslot = NULL;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
@ -308,7 +306,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
hash_ctl.hcxt = tablecxt;
hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
return hashtable;
}
@ -341,6 +339,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
TupleDesc tupdesc;
oldContext = MemoryContextSwitchTo(hashtable->tablecxt);
/*
* We copy the input tuple descriptor just for safety --- we assume
* all input tuples will have equivalent descriptors.
@ -382,9 +381,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
/*
* created new entry
*
* Zero any caller-requested space in the entry. (This zaps
* the "key data" dynahash.c copied into the new entry, but we
* don't care since we're about to overwrite it anyway.)
* Zero any caller-requested space in the entry. (This zaps the "key
* data" dynahash.c copied into the new entry, but we don't care
* since we're about to overwrite it anyway.)
*/
MemSet(entry, 0, hashtable->entrysize);
@ -482,6 +481,7 @@ static int
TupleHashTableMatch(const void *key1, const void *key2, Size keysize)
{
HeapTuple tuple1 = ((const TupleHashEntryData *) key1)->firstTuple;
#ifdef USE_ASSERT_CHECKING
HeapTuple tuple2 = ((const TupleHashEntryData *) key2)->firstTuple;
#endif

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.49 2005/04/06 16:34:04 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -55,7 +55,7 @@
*
* Initialize the Junk filter.
*
* The source targetlist is passed in. The output tuple descriptor is
* The source targetlist is passed in. The output tuple descriptor is
* built from the non-junk tlist entries, plus the passed specification
* of whether to include room for an OID or not.
* An optional resultSlot can be passed as well.
@ -87,11 +87,11 @@ ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot)
* Now calculate the mapping between the original tuple's attributes and
* the "clean" tuple's attributes.
*
* The "map" is an array of "cleanLength" attribute numbers, i.e. one
* entry for every attribute of the "clean" tuple. The value of this
* entry is the attribute number of the corresponding attribute of the
* "original" tuple. (Zero indicates a NULL output attribute, but we
* do not use that feature in this routine.)
* The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
* for every attribute of the "clean" tuple. The value of this entry is
* the attribute number of the corresponding attribute of the "original"
* tuple. (Zero indicates a NULL output attribute, but we do not use that
* feature in this routine.)
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
@ -155,14 +155,14 @@ ExecInitJunkFilterConversion(List *targetList,
slot = MakeSingleTupleTableSlot(cleanTupType);
/*
* Calculate the mapping between the original tuple's attributes and
* the "clean" tuple's attributes.
* Calculate the mapping between the original tuple's attributes and the
* "clean" tuple's attributes.
*
* The "map" is an array of "cleanLength" attribute numbers, i.e. one
* entry for every attribute of the "clean" tuple. The value of this
* entry is the attribute number of the corresponding attribute of the
* "original" tuple. We store zero for any deleted attributes, marking
* that a NULL is needed in the output tuple.
* The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
* for every attribute of the "clean" tuple. The value of this entry is
* the attribute number of the corresponding attribute of the "original"
* tuple. We store zero for any deleted attributes, marking that a NULL
* is needed in the output tuple.
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
@ -220,8 +220,8 @@ ExecGetJunkAttribute(JunkFilter *junkfilter,
ListCell *t;
/*
* Look in the junkfilter's target list for an attribute with
* the given name
* Look in the junkfilter's target list for an attribute with the given
* name
*/
foreach(t, junkfilter->jf_targetList)
{

View File

@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.255 2005/08/26 03:07:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -208,8 +208,7 @@ ExecutorRun(QueryDesc *queryDesc,
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
/*
* extract information from the query descriptor and the query
* feature.
* extract information from the query descriptor and the query feature.
*/
operation = queryDesc->operation;
dest = queryDesc->dest;
@ -352,15 +351,15 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
{
AclMode requiredPerms;
Oid relOid;
Oid userid;
Oid userid;
/*
* Only plain-relation RTEs need to be checked here. Subquery RTEs
* are checked by ExecInitSubqueryScan if the subquery is still a
* separate subquery --- if it's been pulled up into our query level
* then the RTEs are in our rangetable and will be checked here.
* Function RTEs are checked by init_fcache when the function is
* prepared for execution. Join and special RTEs need no checks.
* Only plain-relation RTEs need to be checked here. Subquery RTEs are
* checked by ExecInitSubqueryScan if the subquery is still a separate
* subquery --- if it's been pulled up into our query level then the RTEs
* are in our rangetable and will be checked here. Function RTEs are
* checked by init_fcache when the function is prepared for execution.
* Join and special RTEs need no checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
@ -375,19 +374,17 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
relOid = rte->relid;
/*
* userid to check as: current user unless we have a setuid
* indication.
* userid to check as: current user unless we have a setuid indication.
*
* Note: GetUserId() is presently fast enough that there's no harm in
* calling it separately for each RTE. If that stops being true, we
* could call it once in ExecCheckRTPerms and pass the userid down
* from there. But for now, no need for the extra clutter.
* Note: GetUserId() is presently fast enough that there's no harm in calling
* it separately for each RTE. If that stops being true, we could call it
* once in ExecCheckRTPerms and pass the userid down from there. But for
* now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/*
* We must have *all* the requiredPerms bits, so use aclmask not
* aclcheck.
* We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
*/
if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
!= requiredPerms)
@ -515,8 +512,7 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
else
{
/*
* Single result relation identified by
* parseTree->resultRelation
* Single result relation identified by parseTree->resultRelation
*/
numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
@ -544,8 +540,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* Detect whether we're doing SELECT INTO. If so, set the es_into_oids
* flag appropriately so that the plan tree will be initialized with
* the correct tuple descriptors.
* flag appropriately so that the plan tree will be initialized with the
* correct tuple descriptors.
*/
do_select_into = false;
@ -583,10 +579,10 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
}
/*
* initialize the executor "tuple" table. We need slots for all the
* plan nodes, plus possibly output slots for the junkfilter(s). At
* this point we aren't sure if we need junkfilters, so just add slots
* for them unconditionally.
* initialize the executor "tuple" table. We need slots for all the plan
* nodes, plus possibly output slots for the junkfilter(s). At this point
* we aren't sure if we need junkfilters, so just add slots for them
* unconditionally.
*/
{
int nSlots = ExecCountSlotsNode(plan);
@ -606,26 +602,26 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
estate->es_useEvalPlan = false;
/*
* initialize the private state information for all the nodes in the
* query tree. This opens files, allocates storage and leaves us
* ready to start processing tuples.
* initialize the private state information for all the nodes in the query
* tree. This opens files, allocates storage and leaves us ready to start
* processing tuples.
*/
planstate = ExecInitNode(plan, estate);
/*
* Get the tuple descriptor describing the type of tuples to return.
* (this is especially important if we are creating a relation with
* "SELECT INTO")
* Get the tuple descriptor describing the type of tuples to return. (this
* is especially important if we are creating a relation with "SELECT
* INTO")
*/
tupType = ExecGetResultType(planstate);
/*
* Initialize the junk filter if needed. SELECT and INSERT queries
* need a filter if there are any junk attrs in the tlist. INSERT and
* SELECT INTO also need a filter if the plan may return raw disk
* tuples (else heap_insert will be scribbling on the source
* relation!). UPDATE and DELETE always need a filter, since there's
* always a junk 'ctid' attribute present --- no need to look first.
* Initialize the junk filter if needed. SELECT and INSERT queries need a
* filter if there are any junk attrs in the tlist. INSERT and SELECT
* INTO also need a filter if the plan may return raw disk tuples (else
* heap_insert will be scribbling on the source relation!). UPDATE and
* DELETE always need a filter, since there's always a junk 'ctid'
* attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@ -661,10 +657,9 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
if (junk_filter_needed)
{
/*
* If there are multiple result relations, each one needs its
* own junk filter. Note this is only possible for
* UPDATE/DELETE, so we can't be fooled by some needing a
* filter and some not.
* If there are multiple result relations, each one needs its own
* junk filter. Note this is only possible for UPDATE/DELETE, so
* we can't be fooled by some needing a filter and some not.
*/
if (parseTree->resultRelations != NIL)
{
@ -687,15 +682,15 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
JunkFilter *j;
j = ExecInitJunkFilter(subplan->plan->targetlist,
resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
ExecAllocTableSlot(estate->es_tupleTable));
resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
ExecAllocTableSlot(estate->es_tupleTable));
resultRelInfo->ri_junkFilter = j;
resultRelInfo++;
}
/*
* Set active junkfilter too; at this point ExecInitAppend
* has already selected an active result relation...
* Set active junkfilter too; at this point ExecInitAppend has
* already selected an active result relation...
*/
estate->es_junkFilter =
estate->es_result_relation_info->ri_junkFilter;
@ -707,7 +702,7 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
j = ExecInitJunkFilter(planstate->plan->targetlist,
tupType->tdhasoid,
ExecAllocTableSlot(estate->es_tupleTable));
ExecAllocTableSlot(estate->es_tupleTable));
estate->es_junkFilter = j;
if (estate->es_result_relation_info)
estate->es_result_relation_info->ri_junkFilter = j;
@ -777,10 +772,9 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the into relation. Note
* that AlterTableCreateToastTable ends with
* CommandCounterIncrement(), so that the TOAST table will be
* visible for insertion.
* If necessary, create a TOAST table for the into relation. Note that
* AlterTableCreateToastTable ends with CommandCounterIncrement(), so
* that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId, true);
@ -795,11 +789,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* We can skip WAL-logging the insertions, unless PITR is in use.
*
* Note that for a non-temp INTO table, this is safe only because
* we know that the catalog changes above will have been WAL-logged,
* and so RecordTransactionCommit will think it needs to WAL-log the
* eventual transaction commit. Else the commit might be lost, even
* though all the data is safely fsync'd ...
* Note that for a non-temp INTO table, this is safe only because we know
* that the catalog changes above will have been WAL-logged, and so
* RecordTransactionCommit will think it needs to WAL-log the eventual
* transaction commit. Else the commit might be lost, even though all
* the data is safely fsync'd ...
*/
estate->es_into_relation_use_wal = XLogArchivingActive();
}
@ -832,19 +826,19 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change sequence \"%s\"",
RelationGetRelationName(resultRelationDesc))));
RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_TOASTVALUE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change TOAST relation \"%s\"",
RelationGetRelationName(resultRelationDesc))));
RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_VIEW:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change view \"%s\"",
RelationGetRelationName(resultRelationDesc))));
RelationGetRelationName(resultRelationDesc))));
break;
}
@ -859,7 +853,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
if (resultRelInfo->ri_TrigDesc)
{
int n = resultRelInfo->ri_TrigDesc->numtriggers;
int n = resultRelInfo->ri_TrigDesc->numtriggers;
resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
palloc0(n * sizeof(FmgrInfo));
@ -878,9 +872,9 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
* index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes.
* descriptors in the result relation info, so that we can add new index
* entries for the tuples we add/update. We need not do this for a
* DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE)
@ -981,8 +975,7 @@ ExecEndPlan(PlanState *planstate, EState *estate)
estate->es_tupleTable = NULL;
/*
* close the result relation(s) if any, but hold locks until xact
* commit.
* close the result relation(s) if any, but hold locks until xact commit.
*/
resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--)
@ -999,10 +992,10 @@ ExecEndPlan(PlanState *planstate, EState *estate)
if (estate->es_into_relation_descriptor != NULL)
{
/*
* If we skipped using WAL, and it's not a temp relation,
* we must force the relation down to disk before it's
* safe to commit the transaction. This requires forcing
* out any dirty buffers and then doing a forced fsync.
* If we skipped using WAL, and it's not a temp relation, we must
* force the relation down to disk before it's safe to commit the
* transaction. This requires forcing out any dirty buffers and then
* doing a forced fsync.
*/
if (!estate->es_into_relation_use_wal &&
!estate->es_into_relation_descriptor->rd_istemp)
@ -1087,8 +1080,7 @@ ExecutePlan(EState *estate,
}
/*
* Loop until we've processed the proper number of tuples from the
* plan.
* Loop until we've processed the proper number of tuples from the plan.
*/
for (;;)
@ -1120,12 +1112,12 @@ lnext: ;
}
/*
* if we have a junk filter, then project a new tuple with the
* junk removed.
* if we have a junk filter, then project a new tuple with the junk
* removed.
*
* Store this new "clean" tuple in the junkfilter's resultSlot.
* (Formerly, we stored it back over the "dirty" tuple, which is
* WRONG because that tuple slot has the wrong descriptor.)
* (Formerly, we stored it back over the "dirty" tuple, which is WRONG
* because that tuple slot has the wrong descriptor.)
*
* Also, extract all the junk information we need.
*/
@ -1151,10 +1143,10 @@ lnext: ;
elog(ERROR, "ctid is NULL");
tupleid = (ItemPointer) DatumGetPointer(datum);
tuple_ctid = *tupleid; /* make sure we don't free the
* ctid!! */
tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
tupleid = &tuple_ctid;
}
/*
* Process any FOR UPDATE or FOR SHARE locking requested.
*/
@ -1171,8 +1163,8 @@ lnext: ;
ItemPointerData update_ctid;
TransactionId update_xmax;
TupleTableSlot *newSlot;
LockTupleMode lockmode;
HTSU_Result test;
LockTupleMode lockmode;
HTSU_Result test;
if (!ExecGetJunkAttribute(junkfilter,
slot,
@ -1210,8 +1202,8 @@ lnext: ;
case HeapTupleUpdated:
if (IsXactIsoLevelSerializable)
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
if (!ItemPointerEquals(&update_ctid,
&tuple.t_self))
{
@ -1230,8 +1222,7 @@ lnext: ;
/*
* if tuple was deleted or PlanQual failed for
* updated tuple - we must not return this
* tuple!
* updated tuple - we must not return this tuple!
*/
goto lnext;
@ -1251,9 +1242,9 @@ lnext: ;
}
/*
* now that we have a tuple, do the appropriate thing with it..
* either return it to the user, add it to a relation someplace,
* delete it from a relation, or modify some of its attributes.
* now that we have a tuple, do the appropriate thing with it.. either
* return it to the user, add it to a relation someplace, delete it
* from a relation, or modify some of its attributes.
*/
switch (operation)
{
@ -1287,9 +1278,9 @@ lnext: ;
}
/*
* check our tuple count.. if we've processed the proper number
* then quit, else loop again and process more tuples. Zero
* numberTuples means no limit.
* check our tuple count.. if we've processed the proper number then
* quit, else loop again and process more tuples. Zero numberTuples
* means no limit.
*/
current_tuple_count++;
if (numberTuples && numberTuples == current_tuple_count)
@ -1383,8 +1374,8 @@ ExecInsert(TupleTableSlot *slot,
Oid newId;
/*
* get the heap tuple out of the tuple table slot, making sure
* we have a writable copy
* get the heap tuple out of the tuple table slot, making sure we have a
* writable copy
*/
tuple = ExecMaterializeSlot(slot);
@ -1396,7 +1387,7 @@ ExecInsert(TupleTableSlot *slot,
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@ -1409,9 +1400,9 @@ ExecInsert(TupleTableSlot *slot,
{
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself. The
* tuple table slot should not try to clear it.
* original. We assume that it was allocated in per-tuple memory
* context, and therefore will go away by itself. The tuple table
* slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@ -1427,8 +1418,8 @@ ExecInsert(TupleTableSlot *slot,
/*
* insert the tuple
*
* Note: heap_insert returns the tid (location) of the new tuple
* in the t_self field.
* Note: heap_insert returns the tid (location) of the new tuple in the
* t_self field.
*/
newId = heap_insert(resultRelationDesc, tuple,
estate->es_snapshot->curcid,
@ -1463,7 +1454,7 @@ ExecDelete(TupleTableSlot *slot,
{
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
HTSU_Result result;
HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
@ -1475,7 +1466,7 @@ ExecDelete(TupleTableSlot *slot,
/* BEFORE ROW DELETE Triggers */
if (resultRelInfo->ri_TrigDesc &&
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
{
bool dodelete;
@ -1489,9 +1480,9 @@ ExecDelete(TupleTableSlot *slot,
/*
* delete the tuple
*
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
* the row to be deleted is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
* row to be deleted is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
ldelete:;
@ -1543,9 +1534,9 @@ ldelete:;
* Note: Normally one would think that we have to delete index tuples
* associated with the heap tuple now..
*
* ... but in POSTGRES, we have no need to do this because the vacuum
* daemon automatically opens an index scan and deletes index tuples
* when it finds deleted heap tuples. -cim 9/27/89
* ... but in POSTGRES, we have no need to do this because the vacuum daemon
* automatically opens an index scan and deletes index tuples when it
* finds deleted heap tuples. -cim 9/27/89
*/
/* AFTER ROW DELETE Triggers */
@ -1571,7 +1562,7 @@ ExecUpdate(TupleTableSlot *slot,
HeapTuple tuple;
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
HTSU_Result result;
HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
@ -1582,8 +1573,8 @@ ExecUpdate(TupleTableSlot *slot,
elog(ERROR, "cannot UPDATE during bootstrap");
/*
* get the heap tuple out of the tuple table slot, making sure
* we have a writable copy
* get the heap tuple out of the tuple table slot, making sure we have a
* writable copy
*/
tuple = ExecMaterializeSlot(slot);
@ -1595,7 +1586,7 @@ ExecUpdate(TupleTableSlot *slot,
/* BEFORE ROW UPDATE Triggers */
if (resultRelInfo->ri_TrigDesc &&
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
{
HeapTuple newtuple;
@ -1610,9 +1601,9 @@ ExecUpdate(TupleTableSlot *slot,
{
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself. The
* tuple table slot should not try to clear it.
* original. We assume that it was allocated in per-tuple memory
* context, and therefore will go away by itself. The tuple table
* slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@ -1622,11 +1613,11 @@ ExecUpdate(TupleTableSlot *slot,
/*
* Check the constraints of the tuple
*
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck constraints. (We don't need to
* redo triggers, however. If there are any BEFORE triggers then
* trigger.c will have done heap_lock_tuple to lock the correct tuple,
* so there's no need to do them again.)
* If we generate a new candidate tuple after EvalPlanQual testing, we must
* loop back here and recheck constraints. (We don't need to redo
* triggers, however. If there are any BEFORE triggers then trigger.c
* will have done heap_lock_tuple to lock the correct tuple, so there's no
* need to do them again.)
*/
lreplace:;
if (resultRelationDesc->rd_att->constr)
@ -1635,9 +1626,9 @@ lreplace:;
/*
* replace the heap tuple
*
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
* the row to be updated is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
* row to be updated is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
result = heap_update(resultRelationDesc, tupleid, tuple,
@ -1687,18 +1678,18 @@ lreplace:;
(estate->es_processed)++;
/*
* Note: instead of having to update the old index tuples associated
* with the heap tuple, all we do is form and insert new index tuples.
* This is because UPDATEs are actually DELETEs and INSERTs, and index
* tuple deletion is done automagically by the vacuum daemon. All we
* do is insert new index tuples. -cim 9/27/89
* Note: instead of having to update the old index tuples associated with
* the heap tuple, all we do is form and insert new index tuples. This is
* because UPDATEs are actually DELETEs and INSERTs, and index tuple
* deletion is done automagically by the vacuum daemon. All we do is
* insert new index tuples. -cim 9/27/89
*/
/*
* insert index entries for tuple
*
* Note: heap_update returns the tid (location) of the new tuple
* in the t_self field.
* Note: heap_update returns the tid (location) of the new tuple in the
* t_self field.
*/
if (resultRelInfo->ri_NumIndices > 0)
ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
@ -1721,8 +1712,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
/*
* If first time through for this result relation, build expression
* nodetrees for rel's constraint expressions. Keep them in the
* per-query memory context so they'll survive throughout the query.
* nodetrees for rel's constraint expressions. Keep them in the per-query
* memory context so they'll survive throughout the query.
*/
if (resultRelInfo->ri_ConstraintExprs == NULL)
{
@ -1740,8 +1731,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
}
/*
* We will use the EState's per-tuple context for evaluating
* constraint expressions (creating it if it's not already there).
* We will use the EState's per-tuple context for evaluating constraint
* expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
@ -1787,7 +1778,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value in column \"%s\" violates not-null constraint",
NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
}
}
@ -1870,9 +1861,9 @@ EvalPlanQual(EState *estate, Index rti,
{
/*
* If xmin isn't what we're expecting, the slot must have been
* recycled and reused for an unrelated tuple. This implies
* that the latest version of the row was deleted, so we need
* do nothing. (Should be safe to examine xmin without getting
* recycled and reused for an unrelated tuple. This implies that
* the latest version of the row was deleted, so we need do
* nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
* tuple.)
*/
@ -1888,8 +1879,8 @@ EvalPlanQual(EState *estate, Index rti,
elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
/*
* If tuple is being updated by other transaction then we have
* to wait for its commit/abort.
* If tuple is being updated by other transaction then we have to
* wait for its commit/abort.
*/
if (TransactionIdIsValid(SnapshotDirty->xmax))
{
@ -1907,8 +1898,8 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
* If the referenced slot was actually empty, the latest version
* of the row must have been deleted, so we need do nothing.
* If the referenced slot was actually empty, the latest version of
* the row must have been deleted, so we need do nothing.
*/
if (tuple.t_data == NULL)
{
@ -1928,15 +1919,15 @@ EvalPlanQual(EState *estate, Index rti,
/*
* If we get here, the tuple was found but failed SnapshotDirty.
* Assuming the xmin is either a committed xact or our own xact
* (as it certainly should be if we're trying to modify the tuple),
* this must mean that the row was updated or deleted by either
* a committed xact or our own xact. If it was deleted, we can
* ignore it; if it was updated then chain up to the next version
* and repeat the whole test.
* Assuming the xmin is either a committed xact or our own xact (as it
* certainly should be if we're trying to modify the tuple), this must
* mean that the row was updated or deleted by either a committed xact
* or our own xact. If it was deleted, we can ignore it; if it was
* updated then chain up to the next version and repeat the whole
* test.
*
* As above, it should be safe to examine xmax and t_ctid without
* the buffer content lock, because they can't be changing.
* As above, it should be safe to examine xmax and t_ctid without the
* buffer content lock, because they can't be changing.
*/
if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
{
@ -1954,8 +1945,8 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
* For UPDATE/DELETE we have to return tid of actual row we're
* executing PQ for.
* For UPDATE/DELETE we have to return tid of actual row we're executing
* PQ for.
*/
*tid = tuple.t_self;
@ -1974,10 +1965,10 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
* If this is request for another RTE - Ra, - then we have to check
* wasn't PlanQual requested for Ra already and if so then Ra' row was
* updated again and we have to re-start old execution for Ra and
* forget all what we done after Ra was suspended. Cool? -:))
* If this is request for another RTE - Ra, - then we have to check wasn't
* PlanQual requested for Ra already and if so then Ra' row was updated
* again and we have to re-start old execution for Ra and forget all what
* we done after Ra was suspended. Cool? -:))
*/
if (epq != NULL && epq->rti != rti &&
epq->estate->es_evTuple[rti - 1] != NULL)
@ -1999,8 +1990,8 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
* If we are requested for another RTE then we have to suspend
* execution of current PlanQual and start execution for new one.
* If we are requested for another RTE then we have to suspend execution
* of current PlanQual and start execution for new one.
*/
if (epq == NULL || epq->rti != rti)
{
@ -2031,18 +2022,17 @@ EvalPlanQual(EState *estate, Index rti,
Assert(epq->rti == rti);
/*
* Ok - we're requested for the same RTE. Unfortunately we still have
* to end and restart execution of the plan, because ExecReScan
* wouldn't ensure that upper plan nodes would reset themselves. We
* could make that work if insertion of the target tuple were
* integrated with the Param mechanism somehow, so that the upper plan
* nodes know that their children's outputs have changed.
* Ok - we're requested for the same RTE. Unfortunately we still have to
* end and restart execution of the plan, because ExecReScan wouldn't
* ensure that upper plan nodes would reset themselves. We could make
* that work if insertion of the target tuple were integrated with the
* Param mechanism somehow, so that the upper plan nodes know that their
* children's outputs have changed.
*
* Note that the stack of free evalPlanQual nodes is quite useless at the
* moment, since it only saves us from pallocing/releasing the
* evalPlanQual nodes themselves. But it will be useful once we
* implement ReScan instead of end/restart for re-using PlanQual
* nodes.
* evalPlanQual nodes themselves. But it will be useful once we implement
* ReScan instead of end/restart for re-using PlanQual nodes.
*/
if (endNode)
{
@ -2055,15 +2045,14 @@ EvalPlanQual(EState *estate, Index rti,
*
* Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
* instead copy down changeable state from the top plan (including
* es_result_relation_info, es_junkFilter) and reset locally
* changeable state in the epq (including es_param_exec_vals,
* es_evTupleNull).
* es_result_relation_info, es_junkFilter) and reset locally changeable
* state in the epq (including es_param_exec_vals, es_evTupleNull).
*/
EvalPlanQualStart(epq, estate, epq->next);
/*
* free old RTE' tuple, if any, and store target tuple where
* relation's scan node will see it
* free old RTE' tuple, if any, and store target tuple where relation's
* scan node will see it
*/
epqstate = epq->estate;
if (epqstate->es_evTuple[rti - 1] != NULL)
@ -2171,10 +2160,10 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
/*
* The epqstates share the top query's copy of unchanging state such
* as the snapshot, rangetable, result-rel info, and external Param
* info. They need their own copies of local state, including a tuple
* table, es_param_exec_vals, etc.
* The epqstates share the top query's copy of unchanging state such as
* the snapshot, rangetable, result-rel info, and external Param info.
* They need their own copies of local state, including a tuple table,
* es_param_exec_vals, etc.
*/
epqstate->es_direction = ForwardScanDirection;
epqstate->es_snapshot = estate->es_snapshot;
@ -2199,9 +2188,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
epqstate->es_topPlan = estate->es_topPlan;
/*
* Each epqstate must have its own es_evTupleNull state, but all the
* stack entries share es_evTuple state. This allows sub-rechecks to
* inherit the value being examined by an outer recheck.
* Each epqstate must have its own es_evTupleNull state, but all the stack
* entries share es_evTuple state. This allows sub-rechecks to inherit
* the value being examined by an outer recheck.
*/
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
if (priorepq == NULL)

View File

@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.50 2005/04/19 22:35:11 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.51 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -240,8 +240,8 @@ ExecInitNode(Plan *node, EState *estate)
}
/*
* Initialize any initPlans present in this node. The planner put
* them in a separate list for us.
* Initialize any initPlans present in this node. The planner put them in
* a separate list for us.
*/
subps = NIL;
foreach(l, node->initPlan)
@ -258,9 +258,9 @@ ExecInitNode(Plan *node, EState *estate)
/*
* Initialize any subPlans present in this node. These were found by
* ExecInitExpr during initialization of the PlanState. Note we must
* do this after initializing initPlans, in case their arguments
* contain subPlans (is that actually possible? perhaps not).
* ExecInitExpr during initialization of the PlanState. Note we must do
* this after initializing initPlans, in case their arguments contain
* subPlans (is that actually possible? perhaps not).
*/
foreach(l, result->subPlan)
{
@ -422,7 +422,7 @@ ExecProcNode(PlanState *node)
Node *
MultiExecProcNode(PlanState *node)
{
Node *result;
Node *result;
CHECK_FOR_INTERRUPTS();
@ -431,9 +431,9 @@ MultiExecProcNode(PlanState *node)
switch (nodeTag(node))
{
/*
* Only node types that actually support multiexec will be listed
*/
/*
* Only node types that actually support multiexec will be listed
*/
case T_HashState:
result = MultiExecHash((HashState *) node);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.180 2005/06/26 22:05:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.181 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -89,8 +89,8 @@ static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCaseTestExpr(ExprState *exprstate,
@ -106,8 +106,8 @@ static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalMinMax(MinMaxExprState *minmaxExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
@ -243,8 +243,8 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
isDone));
/*
* If refexpr yields NULL, and it's a fetch, then result is NULL. In
* the assignment case, we'll cons up something below.
* If refexpr yields NULL, and it's a fetch, then result is NULL. In the
* assignment case, we'll cons up something below.
*/
if (*isNull)
{
@ -298,8 +298,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
NULL));
/*
* If any index expr yields NULL, result is NULL or source
* array
* If any index expr yields NULL, result is NULL or source array
*/
if (eisnull)
{
@ -326,13 +325,12 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
/*
* Evaluate the value to be assigned into the array.
*
* XXX At some point we'll need to look into making the old value of
* the array element available via CaseTestExpr, as is done by
* ExecEvalFieldStore. This is not needed now but will be needed
* to support arrays of composite types; in an assignment to a
* field of an array member, the parser would generate a
* FieldStore that expects to fetch its input tuple via
* CaseTestExpr.
* XXX At some point we'll need to look into making the old value of the
* array element available via CaseTestExpr, as is done by
* ExecEvalFieldStore. This is not needed now but will be needed to
* support arrays of composite types; in an assignment to a field of
* an array member, the parser would generate a FieldStore that
* expects to fetch its input tuple via CaseTestExpr.
*/
sourceData = ExecEvalExpr(astate->refassgnexpr,
econtext,
@ -340,19 +338,18 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
NULL);
/*
* For now, can't cope with inserting NULL into an array, so make
* it a no-op per discussion above...
* For now, can't cope with inserting NULL into an array, so make it a
* no-op per discussion above...
*/
if (eisnull)
return PointerGetDatum(array_source);
/*
* For an assignment, if all the subscripts and the input
* expression are non-null but the original array is null, then
* substitute an empty (zero-dimensional) array and proceed with
* the assignment. This only works for varlena arrays, though; for
* fixed-length array types we punt and return the null input
* array.
* For an assignment, if all the subscripts and the input expression
* are non-null but the original array is null, then substitute an
* empty (zero-dimensional) array and proceed with the assignment.
* This only works for varlena arrays, though; for fixed-length array
* types we punt and return the null input array.
*/
if (*isNull)
{
@ -379,7 +376,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
else
resultArray = array_set_slice(array_source, i,
upper.indx, lower.indx,
(ArrayType *) DatumGetPointer(sourceData),
(ArrayType *) DatumGetPointer(sourceData),
astate->refattrlength,
astate->refelemlength,
astate->refelembyval,
@ -451,10 +448,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
/*
* Get the slot and attribute number we want
*
* The asserts check that references to system attributes only appear at
* the level of a relation scan; at higher levels, system attributes
* must be treated as ordinary variables (since we no longer have
* access to the original tuple).
* The asserts check that references to system attributes only appear at the
* level of a relation scan; at higher levels, system attributes must be
* treated as ordinary variables (since we no longer have access to the
* original tuple).
*/
attnum = variable->varattno;
@ -477,6 +474,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
}
#ifdef USE_ASSERT_CHECKING
/*
* Some checks that are only applied for user attribute numbers (bogus
* system attnums will be caught inside slot_getattr).
@ -491,11 +489,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
Assert(attnum <= tuple_type->natts);
/*
* This assert checks that the datatype the plan expects to get
* (as told by our "variable" argument) is in fact the datatype of
* the attribute being fetched (as seen in the current context,
* identified by our "econtext" argument). Otherwise crashes are
* likely.
* This assert checks that the datatype the plan expects to get (as
* told by our "variable" argument) is in fact the datatype of the
* attribute being fetched (as seen in the current context, identified
* by our "econtext" argument). Otherwise crashes are likely.
*
* Note that we can't check dropped columns, since their atttypid has
* been zeroed.
@ -503,7 +500,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid ||
tuple_type->attrs[attnum - 1]->attisdropped);
}
#endif /* USE_ASSERT_CHECKING */
#endif /* USE_ASSERT_CHECKING */
return slot_getattr(slot, attnum, isNull);
}
@ -559,9 +556,8 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
if (thisParamKind == PARAM_EXEC)
{
/*
* PARAM_EXEC params (internal executor parameters) are stored in
* the ecxt_param_exec_vals array, and can be accessed by array
* index.
* PARAM_EXEC params (internal executor parameters) are stored in the
* ecxt_param_exec_vals array, and can be accessed by array index.
*/
ParamExecData *prm;
@ -579,8 +575,7 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
else
{
/*
* All other parameter types must be sought in
* ecxt_param_list_info.
* All other parameter types must be sought in ecxt_param_list_info.
*/
ParamListInfo paramInfo;
@ -641,9 +636,9 @@ GetAttributeByNum(HeapTupleHeader tuple,
tupDesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
/*
* heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
* all the fields in the struct just in case user tries to inspect
* system columns.
* heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
* the fields in the struct just in case user tries to inspect system
* columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
@ -699,9 +694,9 @@ GetAttributeByName(HeapTupleHeader tuple, const char *attname, bool *isNull)
elog(ERROR, "attribute \"%s\" does not exist", attname);
/*
* heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
* all the fields in the struct just in case user tries to inspect
* system columns.
* heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
* the fields in the struct just in case user tries to inspect system
* columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
@ -730,9 +725,9 @@ init_fcache(Oid foid, FuncExprState *fcache, MemoryContext fcacheCxt)
/*
* Safety check on nargs. Under normal circumstances this should never
* fail, as parser should check sooner. But possibly it might fail
* if server has been compiled with FUNC_MAX_ARGS smaller than some
* functions declared in pg_proc?
* fail, as parser should check sooner. But possibly it might fail if
* server has been compiled with FUNC_MAX_ARGS smaller than some functions
* declared in pg_proc?
*/
if (list_length(fcache->args) > FUNC_MAX_ARGS)
ereport(ERROR,
@ -793,10 +788,9 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
if (thisArgIsDone != ExprSingleResult)
{
/*
* We allow only one argument to have a set value; we'd need
* much more complexity to keep track of multiple set
* arguments (cf. ExecTargetList) and it doesn't seem worth
* it.
* We allow only one argument to have a set value; we'd need much
* more complexity to keep track of multiple set arguments (cf.
* ExecTargetList) and it doesn't seem worth it.
*/
if (argIsDone != ExprSingleResult)
ereport(ERROR,
@ -835,11 +829,10 @@ ExecMakeFunctionResult(FuncExprState *fcache,
check_stack_depth();
/*
* arguments is a list of expressions to evaluate before passing to
* the function manager. We skip the evaluation if it was already
* done in the previous call (ie, we are continuing the evaluation of
* a set-valued function). Otherwise, collect the current argument
* values into fcinfo.
* arguments is a list of expressions to evaluate before passing to the
* function manager. We skip the evaluation if it was already done in the
* previous call (ie, we are continuing the evaluation of a set-valued
* function). Otherwise, collect the current argument values into fcinfo.
*/
if (!fcache->setArgsValid)
{
@ -870,8 +863,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
}
/*
* If function returns set, prepare a resultinfo node for
* communication
* If function returns set, prepare a resultinfo node for communication
*/
if (fcache->func.fn_retset)
{
@ -887,14 +879,14 @@ ExecMakeFunctionResult(FuncExprState *fcache,
}
/*
* now return the value gotten by calling the function manager,
* passing the function the evaluated parameter values.
* now return the value gotten by calling the function manager, passing
* the function the evaluated parameter values.
*/
if (fcache->func.fn_retset || hasSetArg)
{
/*
* We need to return a set result. Complain if caller not ready
* to accept one.
* We need to return a set result. Complain if caller not ready to
* accept one.
*/
if (isDone == NULL)
ereport(ERROR,
@ -902,18 +894,18 @@ ExecMakeFunctionResult(FuncExprState *fcache,
errmsg("set-valued function called in context that cannot accept a set")));
/*
* This loop handles the situation where we have both a set
* argument and a set-valued function. Once we have exhausted the
* function's value(s) for a particular argument value, we have to
* get the next argument value and start the function over again.
* We might have to do it more than once, if the function produces
* an empty result set for a particular input value.
* This loop handles the situation where we have both a set argument
* and a set-valued function. Once we have exhausted the function's
* value(s) for a particular argument value, we have to get the next
* argument value and start the function over again. We might have to
* do it more than once, if the function produces an empty result set
* for a particular input value.
*/
for (;;)
{
/*
* If function is strict, and there are any NULL arguments,
* skip calling the function (at least for this set of args).
* If function is strict, and there are any NULL arguments, skip
* calling the function (at least for this set of args).
*/
bool callit = true;
@ -948,8 +940,8 @@ ExecMakeFunctionResult(FuncExprState *fcache,
{
/*
* Got a result from current argument. If function itself
* returns set, save the current argument values to re-use
* on the next call.
* returns set, save the current argument values to re-use on
* the next call.
*/
if (fcache->func.fn_retset && *isDone == ExprMultipleResult)
{
@ -961,7 +953,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
{
RegisterExprContextCallback(econtext,
ShutdownFuncExpr,
PointerGetDatum(fcache));
PointerGetDatum(fcache));
fcache->shutdown_reg = true;
}
}
@ -992,8 +984,8 @@ ExecMakeFunctionResult(FuncExprState *fcache,
}
/*
* If we reach here, loop around to run the function on the
* new argument.
* If we reach here, loop around to run the function on the new
* argument.
*/
}
}
@ -1003,9 +995,9 @@ ExecMakeFunctionResult(FuncExprState *fcache,
* Non-set case: much easier.
*
* We change the ExprState function pointer to use the simpler
* ExecMakeFunctionResultNoSets on subsequent calls. This amounts
* to assuming that no argument can return a set if it didn't do
* so the first time.
* ExecMakeFunctionResultNoSets on subsequent calls. This amounts to
* assuming that no argument can return a set if it didn't do so the
* first time.
*/
fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets;
@ -1074,8 +1066,8 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache,
InitFunctionCallInfoData(fcinfo, &(fcache->func), i, NULL, NULL);
/*
* If function is strict, and there are any NULL arguments, skip
* calling the function and return NULL.
* If function is strict, and there are any NULL arguments, skip calling
* the function and return NULL.
*/
if (fcache->func.fn_strict)
{
@ -1100,7 +1092,7 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache,
* ExecMakeTableFunctionResult
*
* Evaluate a table function, producing a materialized result in a Tuplestore
* object. *returnDesc is set to the tupledesc actually returned by the
* object. *returnDesc is set to the tupledesc actually returned by the
* function, or NULL if it didn't provide one.
*/
Tuplestorestate *
@ -1130,11 +1122,11 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
get_typtype(funcrettype) == 'c');
/*
* Prepare a resultinfo node for communication. We always do this
* even if not expecting a set result, so that we can pass
* expectedDesc. In the generic-expression case, the expression
* doesn't actually get to see the resultinfo, but set it up anyway
* because we use some of the fields as our own state variables.
* Prepare a resultinfo node for communication. We always do this even if
* not expecting a set result, so that we can pass expectedDesc. In the
* generic-expression case, the expression doesn't actually get to see the
* resultinfo, but set it up anyway because we use some of the fields as
* our own state variables.
*/
InitFunctionCallInfoData(fcinfo, NULL, 0, NULL, (Node *) &rsinfo);
rsinfo.type = T_ReturnSetInfo;
@ -1147,14 +1139,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
rsinfo.setDesc = NULL;
/*
* Normally the passed expression tree will be a FuncExprState, since
* the grammar only allows a function call at the top level of a table
* function reference. However, if the function doesn't return set
* then the planner might have replaced the function call via
* constant-folding or inlining. So if we see any other kind of
* expression node, execute it via the general ExecEvalExpr() code;
* the only difference is that we don't get a chance to pass a special
* ReturnSetInfo to any functions buried in the expression.
* Normally the passed expression tree will be a FuncExprState, since the
* grammar only allows a function call at the top level of a table
* function reference. However, if the function doesn't return set then
* the planner might have replaced the function call via constant-folding
* or inlining. So if we see any other kind of expression node, execute
* it via the general ExecEvalExpr() code; the only difference is that we
* don't get a chance to pass a special ReturnSetInfo to any functions
* buried in the expression.
*/
if (funcexpr && IsA(funcexpr, FuncExprState) &&
IsA(funcexpr->expr, FuncExpr))
@ -1182,9 +1174,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Evaluate the function's argument list.
*
* Note: ideally, we'd do this in the per-tuple context, but then the
* argument values would disappear when we reset the context in
* the inner loop. So do it in caller context. Perhaps we should
* make a separate context just to hold the evaluated arguments?
* argument values would disappear when we reset the context in the
* inner loop. So do it in caller context. Perhaps we should make a
* separate context just to hold the evaluated arguments?
*/
fcinfo.flinfo = &(fcache->func);
argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext);
@ -1217,8 +1209,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
}
/*
* Switch to short-lived context for calling the function or
* expression.
* Switch to short-lived context for calling the function or expression.
*/
MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
@ -1232,9 +1223,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
HeapTuple tuple;
/*
* reset per-tuple memory context before each call of the function
* or expression. This cleans up any local memory the function may
* leak when called.
* reset per-tuple memory context before each call of the function or
* expression. This cleans up any local memory the function may leak
* when called.
*/
ResetExprContext(econtext);
@ -1261,12 +1252,12 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
break;
/*
* Can't do anything very useful with NULL rowtype values.
* For a function returning set, we consider this a protocol
* violation (but another alternative would be to just ignore
* the result and "continue" to get another row). For a function
* not returning set, we fall out of the loop; we'll cons up
* an all-nulls result row below.
* Can't do anything very useful with NULL rowtype values. For a
* function returning set, we consider this a protocol violation
* (but another alternative would be to just ignore the result and
* "continue" to get another row). For a function not returning
* set, we fall out of the loop; we'll cons up an all-nulls result
* row below.
*/
if (returnsTuple && fcinfo.isnull)
{
@ -1278,8 +1269,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
}
/*
* If first time through, build tupdesc and tuplestore for
* result
* If first time through, build tupdesc and tuplestore for result
*/
if (first_time)
{
@ -1287,15 +1277,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
if (returnsTuple)
{
/*
* Use the type info embedded in the rowtype Datum to
* look up the needed tupdesc. Make a copy for the
* query.
* Use the type info embedded in the rowtype Datum to look
* up the needed tupdesc. Make a copy for the query.
*/
HeapTupleHeader td;
td = DatumGetHeapTupleHeader(result);
tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td),
HeapTupleHeaderGetTypMod(td));
HeapTupleHeaderGetTypMod(td));
tupdesc = CreateTupleDescCopy(tupdesc);
}
else
@ -1507,7 +1496,7 @@ ExecEvalDistinct(FuncExprState *fcache,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("IS DISTINCT FROM does not support set arguments")));
errmsg("IS DISTINCT FROM does not support set arguments")));
Assert(fcinfo.nargs == 2);
if (fcinfo.argnull[0] && fcinfo.argnull[1])
@ -1580,12 +1569,12 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("op ANY/ALL (array) does not support set arguments")));
errmsg("op ANY/ALL (array) does not support set arguments")));
Assert(fcinfo.nargs == 2);
/*
* If the array is NULL then we return NULL --- it's not very
* meaningful to do anything else, even if the operator isn't strict.
* If the array is NULL then we return NULL --- it's not very meaningful
* to do anything else, even if the operator isn't strict.
*/
if (fcinfo.argnull[1])
{
@ -1598,18 +1587,17 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
/*
* If the array is empty, we return either FALSE or TRUE per the useOr
* flag. This is correct even if the scalar is NULL; since we would
* evaluate the operator zero times, it matters not whether it would
* want to return NULL.
* evaluate the operator zero times, it matters not whether it would want
* to return NULL.
*/
nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
if (nitems <= 0)
return BoolGetDatum(!useOr);
/*
* If the scalar is NULL, and the function is strict, return NULL.
* This is just to avoid having to test for strictness inside the
* loop. (XXX but if arrays could have null elements, we'd need a
* test anyway.)
* If the scalar is NULL, and the function is strict, return NULL. This is
* just to avoid having to test for strictness inside the loop. (XXX but
* if arrays could have null elements, we'd need a test anyway.)
*/
if (fcinfo.argnull[0] && sstate->fxprstate.func.fn_strict)
{
@ -1618,9 +1606,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
}
/*
* We arrange to look up info about the element type only once per
* series of calls, assuming the element type doesn't change
* underneath us.
* We arrange to look up info about the element type only once per series
* of calls, assuming the element type doesn't change underneath us.
*/
if (sstate->element_type != ARR_ELEMTYPE(arr))
{
@ -1711,15 +1698,15 @@ ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
expr_value = ExecEvalExpr(clause, econtext, isNull, NULL);
/*
* if the expression evaluates to null, then we just cascade the null
* back to whoever called us.
* if the expression evaluates to null, then we just cascade the null back
* to whoever called us.
*/
if (*isNull)
return expr_value;
/*
* evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa.
* evaluation of 'not' is simple.. expr is false, then return 'true' and
* vice versa.
*/
return BoolGetDatum(!DatumGetBool(expr_value));
}
@ -1742,18 +1729,17 @@ ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
AnyNull = false;
/*
* If any of the clauses is TRUE, the OR result is TRUE regardless of
* the states of the rest of the clauses, so we can stop evaluating
* and return TRUE immediately. If none are TRUE and one or more is
* NULL, we return NULL; otherwise we return FALSE. This makes sense
* when you interpret NULL as "don't know": if we have a TRUE then the
* OR is TRUE even if we aren't sure about some of the other inputs.
* If all the known inputs are FALSE, but we have one or more "don't
* knows", then we have to report that we "don't know" what the OR's
* result should be --- perhaps one of the "don't knows" would have
* been TRUE if we'd known its value. Only when all the inputs are
* known to be FALSE can we state confidently that the OR's result is
* FALSE.
* If any of the clauses is TRUE, the OR result is TRUE regardless of the
* states of the rest of the clauses, so we can stop evaluating and return
* TRUE immediately. If none are TRUE and one or more is NULL, we return
* NULL; otherwise we return FALSE. This makes sense when you interpret
* NULL as "don't know": if we have a TRUE then the OR is TRUE even if we
* aren't sure about some of the other inputs. If all the known inputs are
* FALSE, but we have one or more "don't knows", then we have to report
* that we "don't know" what the OR's result should be --- perhaps one of
* the "don't knows" would have been TRUE if we'd known its value. Only
* when all the inputs are known to be FALSE can we state confidently that
* the OR's result is FALSE.
*/
foreach(clause, clauses)
{
@ -1794,12 +1780,12 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
AnyNull = false;
/*
* If any of the clauses is FALSE, the AND result is FALSE regardless
* of the states of the rest of the clauses, so we can stop evaluating
* and return FALSE immediately. If none are FALSE and one or more is
* NULL, we return NULL; otherwise we return TRUE. This makes sense
* when you interpret NULL as "don't know", using the same sort of
* reasoning as for OR, above.
* If any of the clauses is FALSE, the AND result is FALSE regardless of
* the states of the rest of the clauses, so we can stop evaluating and
* return FALSE immediately. If none are FALSE and one or more is NULL,
* we return NULL; otherwise we return TRUE. This makes sense when you
* interpret NULL as "don't know", using the same sort of reasoning as for
* OR, above.
*/
foreach(clause, clauses)
@ -1826,7 +1812,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
/* ----------------------------------------------------------------
* ExecEvalConvertRowtype
*
* Evaluate a rowtype coercion operation. This may require
* Evaluate a rowtype coercion operation. This may require
* rearranging field positions.
* ----------------------------------------------------------------
*/
@ -1865,10 +1851,9 @@ ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate,
tmptup.t_data = tuple;
/*
* Extract all the values of the old tuple, offsetting the arrays
* so that invalues[0] is NULL and invalues[1] is the first
* source attribute; this exactly matches the numbering convention
* in attrMap.
* Extract all the values of the old tuple, offsetting the arrays so that
* invalues[0] is NULL and invalues[1] is the first source attribute; this
* exactly matches the numbering convention in attrMap.
*/
heap_deform_tuple(&tmptup, cstate->indesc, invalues + 1, inisnull + 1);
invalues[0] = (Datum) 0;
@ -1915,10 +1900,10 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
*isDone = ExprSingleResult;
/*
* If there's a test expression, we have to evaluate it and save the
* value where the CaseTestExpr placeholders can find it. We must save
* and restore prior setting of econtext's caseValue fields, in case
* this node is itself within a larger CASE.
* If there's a test expression, we have to evaluate it and save the value
* where the CaseTestExpr placeholders can find it. We must save and
* restore prior setting of econtext's caseValue fields, in case this node
* is itself within a larger CASE.
*/
save_datum = econtext->caseValue_datum;
save_isNull = econtext->caseValue_isNull;
@ -1927,14 +1912,14 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
{
econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg,
econtext,
&econtext->caseValue_isNull,
&econtext->caseValue_isNull,
NULL);
}
/*
* we evaluate each of the WHEN clauses in turn, as soon as one is
* true we return the corresponding result. If none are true then we
* return the value of the default clause, or NULL if there is none.
* we evaluate each of the WHEN clauses in turn, as soon as one is true we
* return the corresponding result. If none are true then we return the
* value of the default clause, or NULL if there is none.
*/
foreach(clause, clauses)
{
@ -1947,9 +1932,9 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
NULL);
/*
* if we have a true test, then we return the result, since the
* case statement is satisfied. A NULL result from the test is
* not considered true.
* if we have a true test, then we return the result, since the case
* statement is satisfied. A NULL result from the test is not
* considered true.
*/
if (DatumGetBool(clause_value) && !*isNull)
{
@ -2098,7 +2083,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot merge incompatible arrays"),
errdetail("Array with element type %s cannot be "
"included in ARRAY construct with element type %s.",
"included in ARRAY construct with element type %s.",
format_type_be(ARR_ELEMTYPE(array)),
format_type_be(element_type))));
@ -2110,8 +2095,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
if (ndims <= 0 || ndims > MAXDIM)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of array dimensions (%d) exceeds " \
"the maximum allowed (%d)", ndims, MAXDIM)));
errmsg("number of array dimensions (%d) exceeds " \
"the maximum allowed (%d)", ndims, MAXDIM)));
elem_dims = (int *) palloc(elem_ndims * sizeof(int));
memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int));
@ -2130,8 +2115,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
elem_ndims * sizeof(int)) != 0)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("multidimensional arrays must have array "
"expressions with matching dimensions")));
errmsg("multidimensional arrays must have array "
"expressions with matching dimensions")));
}
elem_ndatabytes = ARR_SIZE(array) - ARR_OVERHEAD(elem_ndims);
@ -2258,10 +2243,10 @@ static Datum
ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
Datum result = (Datum) 0;
Datum result = (Datum) 0;
MinMaxOp op = ((MinMaxExpr *) minmaxExpr->xprstate.expr)->op;
FunctionCallInfoData locfcinfo;
ListCell *arg;
ListCell *arg;
if (isDone)
*isDone = ExprSingleResult;
@ -2295,7 +2280,7 @@ ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext,
locfcinfo.arg[1] = value;
locfcinfo.isnull = false;
cmpresult = DatumGetInt32(FunctionCallInvoke(&locfcinfo));
if (locfcinfo.isnull) /* probably should not happen */
if (locfcinfo.isnull) /* probably should not happen */
continue;
if (cmpresult > 0 && op == IS_LEAST)
result = value;
@ -2531,8 +2516,8 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
if (*isNull)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("domain %s does not allow null values",
format_type_be(ctest->resulttype))));
errmsg("domain %s does not allow null values",
format_type_be(ctest->resulttype))));
break;
case DOM_CONSTRAINT_CHECK:
{
@ -2545,8 +2530,7 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
* Set up value to be returned by CoerceToDomainValue
* nodes. We must save and restore prior setting of
* econtext's domainValue fields, in case this node is
* itself within a check expression for another
* domain.
* itself within a check expression for another domain.
*/
save_datum = econtext->domainValue_datum;
save_isNull = econtext->domainValue_isNull;
@ -2647,9 +2631,9 @@ ExecEvalFieldSelect(FieldSelectState *fstate,
}
/*
* heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
* all the fields in the struct just in case user tries to inspect
* system columns.
* heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
* the fields in the struct just in case user tries to inspect system
* columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
@ -2715,8 +2699,8 @@ ExecEvalFieldStore(FieldStoreState *fstate,
if (!*isNull)
{
/*
* heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader.
* We set all the fields in the struct just in case.
* heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader. We
* set all the fields in the struct just in case.
*/
HeapTupleHeader tuphdr;
HeapTupleData tmptup;
@ -2749,11 +2733,11 @@ ExecEvalFieldStore(FieldStoreState *fstate,
Assert(fieldnum > 0 && fieldnum <= tupDesc->natts);
/*
* Use the CaseTestExpr mechanism to pass down the old value of
* the field being replaced; this is useful in case we have a
* nested field update situation. It's safe to reuse the CASE
* mechanism because there cannot be a CASE between here and where
* the value would be needed.
* Use the CaseTestExpr mechanism to pass down the old value of the
* field being replaced; this is useful in case we have a nested field
* update situation. It's safe to reuse the CASE mechanism because
* there cannot be a CASE between here and where the value would be
* needed.
*/
econtext->caseValue_datum = values[fieldnum - 1];
econtext->caseValue_isNull = isnull[fieldnum - 1];
@ -2895,8 +2879,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* Complain if the aggregate's argument contains any
* aggregates; nested agg functions are semantically
* nonsensical. (This should have been caught
* earlier, but we defend against it here anyway.)
* nonsensical. (This should have been caught earlier,
* but we defend against it here anyway.)
*/
if (naggs != aggstate->numaggs)
ereport(ERROR,
@ -3020,9 +3004,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
elog(ERROR, "SubPlan found with no parent plan");
/*
* Here we just add the SubPlanState nodes to
* parent->subPlan. The subplans will be initialized
* later.
* Here we just add the SubPlanState nodes to parent->subPlan.
* The subplans will be initialized later.
*/
parent->subPlan = lcons(sstate, parent->subPlan);
sstate->sub_estate = NULL;
@ -3073,8 +3056,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
{
ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node;
ConvertRowtypeExprState *cstate = makeNode(ConvertRowtypeExprState);
int i;
int n;
int i;
int n;
cstate->xprstate.evalfunc = (ExprStateEvalFunc) ExecEvalConvertRowtype;
cstate->arg = ExecInitExpr(convert->arg, parent);
@ -3095,7 +3078,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
int j;
if (att->attisdropped)
continue; /* attrMap[i] is already 0 */
continue; /* attrMap[i] is already 0 */
attname = NameStr(att->attname);
atttypid = att->atttypid;
atttypmod = att->atttypmod;
@ -3111,7 +3094,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
elog(ERROR, "attribute \"%s\" of type %s does not match corresponding attribute of type %s",
attname,
format_type_be(cstate->indesc->tdtypeid),
format_type_be(cstate->outdesc->tdtypeid));
format_type_be(cstate->outdesc->tdtypeid));
cstate->attrMap[i] = (AttrNumber) (j + 1);
break;
}
@ -3217,24 +3200,24 @@ ExecInitExpr(Expr *node, PlanState *parent)
if (!attrs[i]->attisdropped)
{
/*
* Guard against ALTER COLUMN TYPE on rowtype
* since the RowExpr was created. XXX should we
* check typmod too? Not sure we can be sure
* it'll be the same.
* Guard against ALTER COLUMN TYPE on rowtype since
* the RowExpr was created. XXX should we check
* typmod too? Not sure we can be sure it'll be the
* same.
*/
if (exprType((Node *) e) != attrs[i]->atttypid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("ROW() column has type %s instead of type %s",
format_type_be(exprType((Node *) e)),
format_type_be(attrs[i]->atttypid))));
format_type_be(exprType((Node *) e)),
format_type_be(attrs[i]->atttypid))));
}
else
{
/*
* Ignore original expression and insert a NULL.
* We don't really care what type of NULL it is,
* so always make an int4 NULL.
* Ignore original expression and insert a NULL. We
* don't really care what type of NULL it is, so
* always make an int4 NULL.
*/
e = (Expr *) makeNullConst(INT4OID);
}
@ -3485,16 +3468,16 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
* Evaluate the qual conditions one at a time. If we find a FALSE
* result, we can stop evaluating and return FALSE --- the AND result
* must be FALSE. Also, if we find a NULL result when resultForNull
* is FALSE, we can stop and return FALSE --- the AND result must be
* FALSE or NULL in that case, and the caller doesn't care which.
* Evaluate the qual conditions one at a time. If we find a FALSE result,
* we can stop evaluating and return FALSE --- the AND result must be
* FALSE. Also, if we find a NULL result when resultForNull is FALSE, we
* can stop and return FALSE --- the AND result must be FALSE or NULL in
* that case, and the caller doesn't care which.
*
* If we get to the end of the list, we can return TRUE. This will
* happen when the AND result is indeed TRUE, or when the AND result
* is NULL (one or more NULL subresult, with all the rest TRUE) and
* the caller has specified resultForNull = TRUE.
* If we get to the end of the list, we can return TRUE. This will happen
* when the AND result is indeed TRUE, or when the AND result is NULL (one
* or more NULL subresult, with all the rest TRUE) and the caller has
* specified resultForNull = TRUE.
*/
result = true;
@ -3637,8 +3620,7 @@ ExecTargetList(List *targetlist,
if (*isDone == ExprSingleResult)
{
/*
* all sets are done, so report that tlist expansion is
* complete.
* all sets are done, so report that tlist expansion is complete.
*/
*isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext);
@ -3647,8 +3629,8 @@ ExecTargetList(List *targetlist,
else
{
/*
* We have some done and some undone sets. Restart the done
* ones so that we can deliver a tuple (if possible).
* We have some done and some undone sets. Restart the done ones
* so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
{
@ -3666,8 +3648,8 @@ ExecTargetList(List *targetlist,
if (itemIsDone[resind] == ExprEndResult)
{
/*
* Oh dear, this item is returning an empty set.
* Guess we can't make a tuple after all.
* Oh dear, this item is returning an empty set. Guess
* we can't make a tuple after all.
*/
*isDone = ExprEndResult;
break;
@ -3676,9 +3658,9 @@ ExecTargetList(List *targetlist,
}
/*
* If we cannot make a tuple because some sets are empty, we
* still have to cycle the nonempty sets to completion, else
* resources will not be released from subplans etc.
* If we cannot make a tuple because some sets are empty, we still
* have to cycle the nonempty sets to completion, else resources
* will not be released from subplans etc.
*
* XXX is that still necessary?
*/
@ -3741,8 +3723,8 @@ ExecVariableList(ProjectionInfo *projInfo,
projInfo->pi_lastScanVar);
/*
* Assign to result by direct extraction of fields from source
* slots ... a mite ugly, but fast ...
* Assign to result by direct extraction of fields from source slots ... a
* mite ugly, but fast ...
*/
for (i = list_length(projInfo->pi_targetlist) - 1; i >= 0; i--)
{
@ -3784,10 +3766,9 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
slot = projInfo->pi_slot;
/*
* Clear any former contents of the result slot. This makes it
* safe for us to use the slot's Datum/isnull arrays as workspace.
* (Also, we can return the slot as-is if we decide no rows can
* be projected.)
* Clear any former contents of the result slot. This makes it safe for
* us to use the slot's Datum/isnull arrays as workspace. (Also, we can
* return the slot as-is if we decide no rows can be projected.)
*/
ExecClearTuple(slot);

View File

@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.36 2005/05/22 22:30:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.37 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -61,16 +61,16 @@ ExecScan(ScanState *node,
projInfo = node->ps.ps_ProjInfo;
/*
* If we have neither a qual to check nor a projection to do,
* just skip all the overhead and return the raw scan tuple.
* If we have neither a qual to check nor a projection to do, just skip
* all the overhead and return the raw scan tuple.
*/
if (!qual && !projInfo)
return (*accessMtd) (node);
/*
* Check to see if we're still projecting out tuples from a previous
* scan tuple (because there is a function-returning-set in the
* projection expressions). If so, try to project another one.
* Check to see if we're still projecting out tuples from a previous scan
* tuple (because there is a function-returning-set in the projection
* expressions). If so, try to project another one.
*/
if (node->ps.ps_TupFromTlist)
{
@ -84,15 +84,15 @@ ExecScan(ScanState *node,
/*
* Reset per-tuple memory context to free any expression evaluation
* storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a scan tuple.
* storage allocated in the previous tuple cycle. Note this can't happen
* until we're done projecting out tuples from a scan tuple.
*/
econtext = node->ps.ps_ExprContext;
ResetExprContext(econtext);
/*
* get a tuple from the access method loop until we obtain a tuple
* which passes the qualification.
* get a tuple from the access method loop until we obtain a tuple which
* passes the qualification.
*/
for (;;)
{
@ -103,10 +103,10 @@ ExecScan(ScanState *node,
slot = (*accessMtd) (node);
/*
* if the slot returned by the accessMtd contains NULL, then it
* means there is nothing more to scan so we just return an empty
* slot, being careful to use the projection result slot so it has
* correct tupleDesc.
* if the slot returned by the accessMtd contains NULL, then it means
* there is nothing more to scan so we just return an empty slot,
* being careful to use the projection result slot so it has correct
* tupleDesc.
*/
if (TupIsNull(slot))
{
@ -125,8 +125,8 @@ ExecScan(ScanState *node,
* check that the current tuple satisfies the qual-clause
*
* check for non-nil qual here to avoid a function call to ExecQual()
* when the qual is nil ... saves only a few cycles, but they add
* up ...
* when the qual is nil ... saves only a few cycles, but they add up
* ...
*/
if (!qual || ExecQual(qual, econtext, false))
{
@ -136,10 +136,9 @@ ExecScan(ScanState *node,
if (projInfo)
{
/*
* Form a projection tuple, store it in the result tuple
* slot and return it --- unless we find we can project no
* tuples from this scan tuple, in which case continue
* scan.
* Form a projection tuple, store it in the result tuple slot
* and return it --- unless we find we can project no tuples
* from this scan tuple, in which case continue scan.
*/
resultSlot = ExecProject(projInfo, &isDone);
if (isDone != ExprEndResult)
@ -226,8 +225,8 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc
return false; /* tlist too long */
/*
* If the plan context requires a particular hasoid setting, then that
* has to match, too.
* If the plan context requires a particular hasoid setting, then that has
* to match, too.
*/
if (ExecContextForcesOids(ps, &hasoid) &&
hasoid != tupdesc->tdhasoid)

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.87 2005/04/06 16:34:04 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -129,7 +129,7 @@ ExecCreateTupleTable(int tableSize)
* allocate the table itself
*/
newtable = (TupleTable) palloc(sizeof(TupleTableData) +
(tableSize - 1) * sizeof(TupleTableSlot));
(tableSize - 1) *sizeof(TupleTableSlot));
newtable->size = tableSize;
newtable->next = 0;
@ -175,10 +175,9 @@ ExecDropTupleTable(TupleTable table, /* tuple table */
Assert(table != NULL);
/*
* first free all the valid pointers in the tuple array and drop
* refcounts of any referenced buffers, if that's what the caller
* wants. (There is probably no good reason for the caller ever not
* to want it!)
* first free all the valid pointers in the tuple array and drop refcounts
* of any referenced buffers, if that's what the caller wants. (There is
* probably no good reason for the caller ever not to want it!)
*/
if (shouldFree)
{
@ -288,9 +287,9 @@ ExecAllocTableSlot(TupleTable table)
Assert(table != NULL);
/*
* We expect that the table was made big enough to begin with.
* We cannot reallocate it on the fly since previous plan nodes
* have already got pointers to individual entries.
* We expect that the table was made big enough to begin with. We cannot
* reallocate it on the fly since previous plan nodes have already got
* pointers to individual entries.
*/
if (table->next >= table->size)
elog(ERROR, "plan requires more slots than are available");
@ -322,8 +321,8 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
ExecClearTuple(slot);
/*
* Release any old descriptor. Also release old Datum/isnull arrays
* if present (we don't bother to check if they could be re-used).
* Release any old descriptor. Also release old Datum/isnull arrays if
* present (we don't bother to check if they could be re-used).
*/
if (slot->tts_shouldFreeDesc)
FreeTupleDesc(slot->tts_tupleDescriptor);
@ -340,9 +339,8 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
slot->tts_shouldFreeDesc = shouldFree;
/*
* Allocate Datum/isnull arrays of the appropriate size. These must
* have the same lifetime as the slot, so allocate in the slot's own
* context.
* Allocate Datum/isnull arrays of the appropriate size. These must have
* the same lifetime as the slot, so allocate in the slot's own context.
*/
slot->tts_values = (Datum *)
MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(Datum));
@ -417,8 +415,8 @@ ExecStoreTuple(HeapTuple tuple,
slot->tts_tuple = tuple;
/*
* If tuple is on a disk page, keep the page pinned as long as we hold
* a pointer into it. We assume the caller already has such a pin.
* If tuple is on a disk page, keep the page pinned as long as we hold a
* pointer into it. We assume the caller already has such a pin.
*/
slot->tts_buffer = buffer;
if (BufferIsValid(buffer))
@ -621,21 +619,20 @@ ExecMaterializeSlot(TupleTableSlot *slot)
Assert(!slot->tts_isempty);
/*
* If we have a physical tuple, and it's locally palloc'd, we have
* nothing to do.
* If we have a physical tuple, and it's locally palloc'd, we have nothing
* to do.
*/
if (slot->tts_tuple && slot->tts_shouldFree)
return slot->tts_tuple;
/*
* Otherwise, copy or build a tuple, and then store it as the new slot
* value. (Note: tts_nvalid will be reset to zero here. There are
* cases in which this could be optimized but it's probably not worth
* worrying about.)
* value. (Note: tts_nvalid will be reset to zero here. There are cases
* in which this could be optimized but it's probably not worth worrying
* about.)
*
* We may be called in a context that is shorter-lived than the
* tuple slot, but we have to ensure that the materialized tuple
* will survive anyway.
* We may be called in a context that is shorter-lived than the tuple slot,
* but we have to ensure that the materialized tuple will survive anyway.
*/
oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
newTuple = ExecCopySlotTuple(slot);
@ -663,9 +660,9 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
MemoryContext oldContext;
/*
* There might be ways to optimize this when the source is virtual,
* but for now just always build a physical copy. Make sure it is
* in the right context.
* There might be ways to optimize this when the source is virtual, but
* for now just always build a physical copy. Make sure it is in the
* right context.
*/
oldContext = MemoryContextSwitchTo(dstslot->tts_mcxt);
newTuple = ExecCopySlotTuple(srcslot);
@ -893,8 +890,7 @@ TupleDescGetAttInMetadata(TupleDesc tupdesc)
attinmeta->tupdesc = BlessTupleDesc(tupdesc);
/*
* Gather info needed later to call the "in" function for each
* attribute
* Gather info needed later to call the "in" function for each attribute
*/
attinfuncinfo = (FmgrInfo *) palloc0(natts * sizeof(FmgrInfo));
attioparams = (Oid *) palloc0(natts * sizeof(Oid));
@ -974,8 +970,8 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values)
tuple = heap_formtuple(tupdesc, dvalues, nulls);
/*
* Release locally palloc'd space. XXX would probably be good to
* pfree values of pass-by-reference datums, as well.
* Release locally palloc'd space. XXX would probably be good to pfree
* values of pass-by-reference datums, as well.
*/
pfree(dvalues);
pfree(nulls);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.125 2005/08/01 20:31:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -63,8 +63,8 @@ int NTupleReplaced;
int NTupleAppended;
int NTupleDeleted;
int NIndexTupleInserted;
extern int NIndexTupleProcessed; /* have to be defined in the
* access method level so that the
extern int NIndexTupleProcessed; /* have to be defined in the access
* method level so that the
* cinterface.a will link ok. */
@ -166,8 +166,8 @@ CreateExecutorState(void)
ALLOCSET_DEFAULT_MAXSIZE);
/*
* Make the EState node within the per-query context. This way, we
* don't need a separate pfree() operation for it at shutdown.
* Make the EState node within the per-query context. This way, we don't
* need a separate pfree() operation for it at shutdown.
*/
oldcontext = MemoryContextSwitchTo(qcontext);
@ -244,16 +244,16 @@ void
FreeExecutorState(EState *estate)
{
/*
* Shut down and free any remaining ExprContexts. We do this
* explicitly to ensure that any remaining shutdown callbacks get
* called (since they might need to release resources that aren't
* simply memory within the per-query memory context).
* Shut down and free any remaining ExprContexts. We do this explicitly
* to ensure that any remaining shutdown callbacks get called (since they
* might need to release resources that aren't simply memory within the
* per-query memory context).
*/
while (estate->es_exprcontexts)
{
/*
* XXX: seems there ought to be a faster way to implement this
* than repeated list_delete(), no?
* XXX: seems there ought to be a faster way to implement this than
* repeated list_delete(), no?
*/
FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts));
/* FreeExprContext removed the list link for us */
@ -324,10 +324,9 @@ CreateExprContext(EState *estate)
econtext->ecxt_callbacks = NULL;
/*
* Link the ExprContext into the EState to ensure it is shut down when
* the EState is freed. Because we use lcons(), shutdowns will occur
* in reverse order of creation, which may not be essential but can't
* hurt.
* Link the ExprContext into the EState to ensure it is shut down when the
* EState is freed. Because we use lcons(), shutdowns will occur in
* reverse order of creation, which may not be essential but can't hurt.
*/
estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
@ -471,9 +470,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
}
/*
* ExecTypeFromTL needs the parse-time representation of the tlist,
* not a list of ExprStates. This is good because some plan nodes
* don't bother to set up planstate->targetlist ...
* ExecTypeFromTL needs the parse-time representation of the tlist, not a
* list of ExprStates. This is good because some plan nodes don't bother
* to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
ExecAssignResultType(planstate, tupDesc, true);
@ -518,8 +517,8 @@ ExecBuildProjectionInfo(List *targetList,
/*
* Determine whether the target list consists entirely of simple Var
* references (ie, references to non-system attributes). If so,
* we can use the simpler ExecVariableList instead of ExecTargetList.
* references (ie, references to non-system attributes). If so, we can
* use the simpler ExecVariableList instead of ExecTargetList.
*/
isVarList = true;
foreach(tl, targetList)
@ -545,18 +544,18 @@ ExecBuildProjectionInfo(List *targetList,
AttrNumber lastOuterVar = 0;
AttrNumber lastScanVar = 0;
projInfo->pi_itemIsDone = NULL; /* not needed */
projInfo->pi_itemIsDone = NULL; /* not needed */
projInfo->pi_varSlotOffsets = varSlotOffsets = (int *)
palloc0(len * sizeof(int));
projInfo->pi_varNumbers = varNumbers = (int *)
palloc0(len * sizeof(int));
/*
* Set up the data needed by ExecVariableList. The slots in which
* the variables can be found at runtime are denoted by the offsets
* of their slot pointers within the econtext. This rather grotty
* representation is needed because the caller may not have given
* us the real econtext yet (see hacks in nodeSubplan.c).
* Set up the data needed by ExecVariableList. The slots in which the
* variables can be found at runtime are denoted by the offsets of
* their slot pointers within the econtext. This rather grotty
* representation is needed because the caller may not have given us
* the real econtext yet (see hacks in nodeSubplan.c).
*/
foreach(tl, targetList)
{
@ -631,7 +630,7 @@ ExecAssignProjectionInfo(PlanState *planstate)
*
* However ... there is no particular need to do it during ExecEndNode,
* because FreeExecutorState will free any remaining ExprContexts within
* the EState. Letting FreeExecutorState do it allows the ExprContexts to
* the EState. Letting FreeExecutorState do it allows the ExprContexts to
* be freed in reverse order of creation, rather than order of creation as
* will happen if we delete them here, which saves O(N^2) work in the list
* cleanup inside FreeExprContext.
@ -641,8 +640,8 @@ void
ExecFreeExprContext(PlanState *planstate)
{
/*
* Per above discussion, don't actually delete the ExprContext.
* We do unlink it from the plan node, though.
* Per above discussion, don't actually delete the ExprContext. We do
* unlink it from the plan node, though.
*/
planstate->ps_ExprContext = NULL;
}
@ -774,13 +773,13 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
* to a new tablespace.
*
* If the index AM is not safe for concurrent updates, obtain an
* exclusive lock on the index to lock out other updaters as well
* as readers (index_beginscan places AccessShareLock).
* exclusive lock on the index to lock out other updaters as well as
* readers (index_beginscan places AccessShareLock).
*
* If there are multiple not-concurrent-safe indexes, all backends
* must lock the indexes in the same order or we will get deadlocks
* here. This is guaranteed by RelationGetIndexList(), which promises
* to return the index list in OID order.
* If there are multiple not-concurrent-safe indexes, all backends must
* lock the indexes in the same order or we will get deadlocks here.
* This is guaranteed by RelationGetIndexList(), which promises to
* return the index list in OID order.
*
* The locks will be released in ExecCloseIndices.
*/
@ -876,9 +875,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapRelation = resultRelInfo->ri_RelationDesc;
/*
* We will use the EState's per-tuple context for evaluating
* predicates and index expressions (creating it if it's not already
* there).
* We will use the EState's per-tuple context for evaluating predicates
* and index expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
@ -903,8 +901,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
List *predicate;
/*
* If predicate state not set up yet, create it (in the
* estate's per-query context)
* If predicate state not set up yet, create it (in the estate's
* per-query context)
*/
predicate = indexInfo->ii_PredicateState;
if (predicate == NIL)
@ -921,8 +919,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
}
/*
* FormIndexDatum fills in its values and isnull parameters with
* the appropriate values for the column(s) of the index.
* FormIndexDatum fills in its values and isnull parameters with the
* appropriate values for the column(s) of the index.
*/
FormIndexDatum(indexInfo,
slot,
@ -931,14 +929,14 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
isnull);
/*
* The index AM does the rest. Note we suppress unique-index
* checks if we are being called from VACUUM, since VACUUM may
* need to move dead tuples that have the same keys as live ones.
* The index AM does the rest. Note we suppress unique-index checks
* if we are being called from VACUUM, since VACUUM may need to move
* dead tuples that have the same keys as live ones.
*/
index_insert(relationDescs[i], /* index relation */
values, /* array of index Datums */
isnull, /* null flags */
tupleid, /* tid of heap tuple */
values, /* array of index Datums */
isnull, /* null flags */
tupleid, /* tid of heap tuple */
heapRelation,
relationDescs[i]->rd_index->indisunique && !is_vacuum);
@ -959,14 +957,14 @@ UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Bitmapset *parmset;
/*
* The plan node only depends on params listed in its allParam set.
* Don't include anything else into its chgParam set.
* The plan node only depends on params listed in its allParam set. Don't
* include anything else into its chgParam set.
*/
parmset = bms_intersect(node->plan->allParam, newchg);
/*
* Keep node->chgParam == NULL if there's not actually any members;
* this allows the simplest possible tests in executor node files.
* Keep node->chgParam == NULL if there's not actually any members; this
* allows the simplest possible tests in executor node files.
*/
if (!bms_is_empty(parmset))
node->chgParam = bms_join(node->chgParam, parmset);
@ -1049,8 +1047,8 @@ ShutdownExprContext(ExprContext *econtext)
return;
/*
* Call the callbacks in econtext's per-tuple context. This ensures
* that any memory they might leak will get cleaned up.
* Call the callbacks in econtext's per-tuple context. This ensures that
* any memory they might leak will get cleaned up.
*/
oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.97 2005/04/10 18:04:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -82,7 +82,7 @@ typedef SQLFunctionCache *SQLFunctionCachePtr;
/* non-export function prototypes */
static execution_state *init_execution_state(List *queryTree_list,
bool readonly_func);
bool readonly_func);
static void init_sql_fcache(FmgrInfo *finfo);
static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
static TupleTableSlot *postquel_getnext(execution_state *es);
@ -115,14 +115,14 @@ init_execution_state(List *queryTree_list, bool readonly_func)
IsA(queryTree->utilityStmt, TransactionStmt))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/* translator: %s is a SQL statement name */
/* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a SQL function",
CreateQueryTag(queryTree))));
if (readonly_func && !QueryIsReadOnly(queryTree))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/* translator: %s is a SQL statement name */
/* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a non-volatile function",
CreateQueryTag(queryTree))));
@ -178,8 +178,8 @@ init_sql_fcache(FmgrInfo *finfo)
procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple);
/*
* get the result type from the procedure tuple, and check for
* polymorphic result type; if so, find out the actual result type.
* get the result type from the procedure tuple, and check for polymorphic
* result type; if so, find out the actual result type.
*/
rettype = procedureStruct->prorettype;
@ -190,7 +190,7 @@ init_sql_fcache(FmgrInfo *finfo)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not determine actual result type for function declared to return type %s",
format_type_be(procedureStruct->prorettype))));
format_type_be(procedureStruct->prorettype))));
}
fcache->rettype = rettype;
@ -208,9 +208,9 @@ init_sql_fcache(FmgrInfo *finfo)
typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
* get the type length and by-value flag from the type tuple; also do
* a preliminary check for returnsTuple (this may prove inaccurate,
* see below).
* get the type length and by-value flag from the type tuple; also do a
* preliminary check for returnsTuple (this may prove inaccurate, see
* below).
*/
fcache->typlen = typeStruct->typlen;
fcache->typbyval = typeStruct->typbyval;
@ -218,8 +218,8 @@ init_sql_fcache(FmgrInfo *finfo)
rettype == RECORDOID);
/*
* Parse and rewrite the queries. We need the argument type info to
* pass to the parser.
* Parse and rewrite the queries. We need the argument type info to pass
* to the parser.
*/
nargs = procedureStruct->pronargs;
haspolyarg = false;
@ -265,17 +265,17 @@ init_sql_fcache(FmgrInfo *finfo)
queryTree_list = pg_parse_and_rewrite(src, argOidVect, nargs);
/*
* If the function has any arguments declared as polymorphic types,
* then it wasn't type-checked at definition time; must do so now.
* If the function has any arguments declared as polymorphic types, then
* it wasn't type-checked at definition time; must do so now.
*
* Also, force a type-check if the declared return type is a rowtype; we
* need to find out whether we are actually returning the whole tuple
* result, or just regurgitating a rowtype expression result. In the
* latter case we clear returnsTuple because we need not act different
* from the scalar result case.
* Also, force a type-check if the declared return type is a rowtype; we need
* to find out whether we are actually returning the whole tuple result,
* or just regurgitating a rowtype expression result. In the latter case
* we clear returnsTuple because we need not act different from the scalar
* result case.
*
* In the returnsTuple case, check_sql_fn_retval will also construct
* a JunkFilter we can use to coerce the returned rowtype to the desired
* In the returnsTuple case, check_sql_fn_retval will also construct a
* JunkFilter we can use to coerce the returned rowtype to the desired
* form.
*/
if (haspolyarg || fcache->returnsTuple)
@ -307,9 +307,9 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
/*
* In a read-only function, use the surrounding query's snapshot;
* otherwise take a new snapshot for each query. The snapshot should
* include a fresh command ID so that all work to date in this
* transaction is visible. We copy in both cases so that postquel_end
* can unconditionally do FreeSnapshot.
* include a fresh command ID so that all work to date in this transaction
* is visible. We copy in both cases so that postquel_end can
* unconditionally do FreeSnapshot.
*/
if (fcache->readonly_func)
snapshot = CopySnapshot(ActiveSnapshot);
@ -470,8 +470,8 @@ postquel_execute(execution_state *es,
if (TupIsNull(slot))
{
/*
* We fall out here for all cases except where we have obtained
* a row from a function's final SELECT.
* We fall out here for all cases except where we have obtained a row
* from a function's final SELECT.
*/
postquel_end(es);
fcinfo->isnull = true;
@ -479,34 +479,34 @@ postquel_execute(execution_state *es,
}
/*
* If we got a row from a command within the function it has to be
* the final command. All others shouldn't be returning anything.
* If we got a row from a command within the function it has to be the
* final command. All others shouldn't be returning anything.
*/
Assert(LAST_POSTQUEL_COMMAND(es));
/*
* Set up to return the function value. For pass-by-reference
* datatypes, be sure to allocate the result in resultcontext,
* not the current memory context (which has query lifespan).
* Set up to return the function value. For pass-by-reference datatypes,
* be sure to allocate the result in resultcontext, not the current memory
* context (which has query lifespan).
*/
oldcontext = MemoryContextSwitchTo(resultcontext);
if (fcache->returnsTuple)
{
/*
* We are returning the whole tuple, so filter it and apply the
* proper labeling to make it a valid Datum. There are several
* reasons why we do this:
* We are returning the whole tuple, so filter it and apply the proper
* labeling to make it a valid Datum. There are several reasons why
* we do this:
*
* 1. To copy the tuple out of the child execution context and
* into the desired result context.
* 1. To copy the tuple out of the child execution context and into the
* desired result context.
*
* 2. To remove any junk attributes present in the raw subselect
* result. (This is probably not absolutely necessary, but it
* seems like good policy.)
* 2. To remove any junk attributes present in the raw subselect result.
* (This is probably not absolutely necessary, but it seems like good
* policy.)
*
* 3. To insert dummy null columns if the declared result type
* has any attisdropped columns.
* 3. To insert dummy null columns if the declared result type has any
* attisdropped columns.
*/
HeapTuple newtup;
HeapTupleHeader dtup;
@ -517,19 +517,18 @@ postquel_execute(execution_state *es,
newtup = ExecRemoveJunk(fcache->junkFilter, slot);
/*
* Compress out the HeapTuple header data. We assume that
* heap_form_tuple made the tuple with header and body in one
* palloc'd chunk. We want to return a pointer to the chunk
* start so that it will work if someone tries to free it.
* Compress out the HeapTuple header data. We assume that
* heap_form_tuple made the tuple with header and body in one palloc'd
* chunk. We want to return a pointer to the chunk start so that it
* will work if someone tries to free it.
*/
t_len = newtup->t_len;
dtup = (HeapTupleHeader) newtup;
memmove((char *) dtup, (char *) newtup->t_data, t_len);
/*
* Use the declared return type if it's not RECORD; else take
* the type from the computed result, making sure a typmod has
* been assigned.
* Use the declared return type if it's not RECORD; else take the type
* from the computed result, making sure a typmod has been assigned.
*/
if (fcache->rettype != RECORDOID)
{
@ -559,9 +558,8 @@ postquel_execute(execution_state *es,
else
{
/*
* Returning a scalar, which we have to extract from the first
* column of the SELECT result, and then copy into result
* context if needed.
* Returning a scalar, which we have to extract from the first column
* of the SELECT result, and then copy into result context if needed.
*/
value = slot_getattr(slot, 1, &(fcinfo->isnull));
@ -617,8 +615,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
es = fcache->func_state;
/*
* Convert params to appropriate format if starting a fresh execution.
* (If continuing execution, we can re-use prior params.)
* Convert params to appropriate format if starting a fresh execution. (If
* continuing execution, we can re-use prior params.)
*/
if (es && es->status == F_EXEC_START)
postquel_sub_params(fcache, fcinfo);
@ -631,8 +629,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
/*
* Execute each command in the function one after another until we're
* executing the final command and get a result or we run out of
* commands.
* executing the final command and get a result or we run out of commands.
*/
while (es)
{
@ -691,8 +688,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
}
/*
* If we got a result from a command within the function it has to be
* the final command. All others shouldn't be returning anything.
* If we got a result from a command within the function it has to be the
* final command. All others shouldn't be returning anything.
*/
Assert(LAST_POSTQUEL_COMMAND(es));
@ -711,8 +708,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
errmsg("set-valued function called in context that cannot accept a set")));
/*
* Ensure we will get shut down cleanly if the exprcontext is not
* run to completion.
* Ensure we will get shut down cleanly if the exprcontext is not run
* to completion.
*/
if (!fcache->shutdown_reg)
{
@ -754,8 +751,7 @@ sql_exec_error_callback(void *arg)
fn_name = NameStr(functup->proname);
/*
* If there is a syntax error position, convert to internal syntax
* error
* If there is a syntax error position, convert to internal syntax error
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
@ -776,11 +772,11 @@ sql_exec_error_callback(void *arg)
}
/*
* Try to determine where in the function we failed. If there is a
* query with non-null QueryDesc, finger it. (We check this rather
* than looking for F_EXEC_RUN state, so that errors during
* ExecutorStart or ExecutorEnd are blamed on the appropriate query;
* see postquel_start and postquel_end.)
* Try to determine where in the function we failed. If there is a query
* with non-null QueryDesc, finger it. (We check this rather than looking
* for F_EXEC_RUN state, so that errors during ExecutorStart or
* ExecutorEnd are blamed on the appropriate query; see postquel_start and
* postquel_end.)
*/
if (fcache)
{
@ -888,9 +884,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (rettype != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Function's final statement must be a SELECT.")));
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Function's final statement must be a SELECT.")));
return false;
}
@ -901,17 +897,16 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
tlist = parse->targetList;
/*
* The last query must be a SELECT if and only if return type isn't
* VOID.
* The last query must be a SELECT if and only if return type isn't VOID.
*/
if (rettype == VOIDOID)
{
if (cmd == CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Function's final statement must not be a SELECT.")));
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Function's final statement must not be a SELECT.")));
return false;
}
@ -919,9 +914,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (cmd != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Function's final statement must be a SELECT.")));
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Function's final statement must be a SELECT.")));
/*
* Count the non-junk entries in the result targetlist.
@ -934,22 +929,22 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
{
/*
* For base-type returns, the target list should have exactly one
* entry, and its type should agree with what the user declared.
* (As of Postgres 7.2, we accept binary-compatible types too.)
* entry, and its type should agree with what the user declared. (As
* of Postgres 7.2, we accept binary-compatible types too.)
*/
if (tlistlen != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Final SELECT must return exactly one column.")));
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Final SELECT must return exactly one column.")));
restype = exprType((Node *) ((TargetEntry *) linitial(tlist))->expr);
if (!IsBinaryCoercible(restype, rettype))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Actual return type is %s.",
format_type_be(restype))));
}
@ -957,16 +952,16 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
{
/* Returns a rowtype */
TupleDesc tupdesc;
int tupnatts; /* physical number of columns in tuple */
int tuplogcols; /* # of nondeleted columns in tuple */
int colindex; /* physical column index */
int tupnatts; /* physical number of columns in tuple */
int tuplogcols; /* # of nondeleted columns in tuple */
int colindex; /* physical column index */
/*
* If the target list is of length 1, and the type of the varnode
* in the target list matches the declared return type, this is
* okay. This can happen, for example, where the body of the
* function is 'SELECT func2()', where func2 has the same return
* type as the function that's calling it.
* If the target list is of length 1, and the type of the varnode in
* the target list matches the declared return type, this is okay.
* This can happen, for example, where the body of the function is
* 'SELECT func2()', where func2 has the same return type as the
* function that's calling it.
*/
if (tlistlen == 1)
{
@ -979,9 +974,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (get_func_result_type(func_id, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
{
/*
* Assume we are returning the whole tuple.
* Crosschecking against what the caller expects will happen at
* runtime.
* Assume we are returning the whole tuple. Crosschecking against
* what the caller expects will happen at runtime.
*/
if (junkFilter)
*junkFilter = ExecInitJunkFilter(tlist, false, NULL);
@ -990,9 +984,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
Assert(tupdesc);
/*
* Verify that the targetlist matches the return tuple type.
* We scan the non-deleted attributes to ensure that they match the
* datatypes of the non-resjunk columns.
* Verify that the targetlist matches the return tuple type. We scan
* the non-deleted attributes to ensure that they match the datatypes
* of the non-resjunk columns.
*/
tupnatts = tupdesc->natts;
tuplogcols = 0; /* we'll count nondeleted cols as we go */
@ -1016,7 +1010,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Final SELECT returns too many columns.")));
errdetail("Final SELECT returns too many columns.")));
attr = tupdesc->attrs[colindex - 1];
} while (attr->attisdropped);
tuplogcols++;
@ -1046,15 +1040,15 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (tlistlen != tuplogcols)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
errdetail("Final SELECT returns too few columns.")));
/* Set up junk filter if needed */
if (junkFilter)
*junkFilter = ExecInitJunkFilterConversion(tlist,
CreateTupleDescCopy(tupdesc),
NULL);
CreateTupleDescCopy(tupdesc),
NULL);
/* Report that we are returning entire tuple result */
return true;
@ -1070,8 +1064,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type %s is not supported for SQL functions",
format_type_be(rettype))));
errmsg("return type %s is not supported for SQL functions",
format_type_be(rettype))));
return false;
}

View File

@ -7,7 +7,7 @@
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.12 2005/04/16 20:07:35 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.13 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -43,7 +43,7 @@ InstrStartNode(Instrumentation *instr)
void
InstrStopNode(Instrumentation *instr, bool returnedTuple)
{
instr_time endtime;
instr_time endtime;
/* count the returned tuples */
if (returnedTuple)
@ -72,7 +72,7 @@ InstrStopNode(Instrumentation *instr, bool returnedTuple)
instr->counter.tv_usec -= 1000000;
instr->counter.tv_sec++;
}
#else /* WIN32 */
#else /* WIN32 */
instr->counter.QuadPart += (endtime.QuadPart - instr->starttime.QuadPart);
#endif

View File

@ -53,7 +53,7 @@
* pass-by-ref inputs, but in the aggregate case we know the left input is
* either the initial transition value or a previous function result, and
* in either case its value need not be preserved. See int8inc() for an
* example. Notice that advance_transition_function() is coded to avoid a
* example. Notice that advance_transition_function() is coded to avoid a
* data copy step when the previous transition value pointer is returned.
*
*
@ -61,7 +61,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.134 2005/06/28 05:08:55 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -109,8 +109,8 @@ typedef struct AggStatePerAggData
/*
* fmgr lookup data for transfer functions --- only valid when
* corresponding oid is not InvalidOid. Note in particular that
* fn_strict flags are kept here.
* corresponding oid is not InvalidOid. Note in particular that fn_strict
* flags are kept here.
*/
FmgrInfo transfn;
FmgrInfo finalfn;
@ -124,8 +124,8 @@ typedef struct AggStatePerAggData
Oid sortOperator;
/*
* fmgr lookup data for input type's equality operator --- only
* set/used when aggregate has DISTINCT flag.
* fmgr lookup data for input type's equality operator --- only set/used
* when aggregate has DISTINCT flag.
*/
FmgrInfo equalfn;
@ -147,14 +147,14 @@ typedef struct AggStatePerAggData
transtypeByVal;
/*
* These values are working state that is initialized at the start of
* an input tuple group and updated for each input tuple.
* These values are working state that is initialized at the start of an
* input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT) aggregate, we just feed the input values
* straight to the transition function. If it's DISTINCT, we pass the
* input values into a Tuplesort object; then at completion of the
* input tuple group, we scan the sorted values, eliminate duplicates,
* and run the transition function on the rest.
* input values into a Tuplesort object; then at completion of the input
* tuple group, we scan the sorted values, eliminate duplicates, and run
* the transition function on the rest.
*/
Tuplesortstate *sortstate; /* sort object, if a DISTINCT agg */
@ -184,12 +184,11 @@ typedef struct AggStatePerGroupData
bool noTransValue; /* true if transValue not set yet */
/*
* Note: noTransValue initially has the same value as
* transValueIsNull, and if true both are cleared to false at the same
* time. They are not the same though: if transfn later returns a
* NULL, we want to keep that NULL and not auto-replace it with a
* later input value. Only the first non-NULL input will be
* auto-substituted.
* Note: noTransValue initially has the same value as transValueIsNull,
* and if true both are cleared to false at the same time. They are not
* the same though: if transfn later returns a NULL, we want to keep that
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
*/
} AggStatePerGroupData;
@ -270,11 +269,11 @@ initialize_aggregates(AggState *aggstate,
}
/*
* If we are reinitializing after a group boundary, we have to
* free any prior transValue to avoid memory leakage. We must
* check not only the isnull flag but whether the pointer is NULL;
* since pergroupstate is initialized with palloc0, the initial
* condition has isnull = 0 and null pointer.
* If we are reinitializing after a group boundary, we have to free
* any prior transValue to avoid memory leakage. We must check not
* only the isnull flag but whether the pointer is NULL; since
* pergroupstate is initialized with palloc0, the initial condition
* has isnull = 0 and null pointer.
*/
if (!peraggstate->transtypeByVal &&
!pergroupstate->transValueIsNull &&
@ -284,8 +283,8 @@ initialize_aggregates(AggState *aggstate,
/*
* (Re)set transValue to the initial value.
*
* Note that when the initial value is pass-by-ref, we must copy it
* (into the aggcontext) since we will pfree the transValue later.
* Note that when the initial value is pass-by-ref, we must copy it (into
* the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
@ -295,18 +294,18 @@ initialize_aggregates(AggState *aggstate,
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(peraggstate->initValue,
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
MemoryContextSwitchTo(oldContext);
}
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
* If the initial value for the transition state doesn't exist in
* the pg_aggregate table then we will let the first non-NULL
* value returned from the outer procNode become the initial
* value. (This is useful for aggregates like max() and min().)
* The noTransValue flag signals that we still need to do this.
* If the initial value for the transition state doesn't exist in the
* pg_aggregate table then we will let the first non-NULL value
* returned from the outer procNode become the initial value. (This is
* useful for aggregates like max() and min().) The noTransValue flag
* signals that we still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
@ -337,20 +336,18 @@ advance_transition_function(AggState *aggstate,
if (pergroupstate->noTransValue)
{
/*
* transValue has not been initialized. This is the first
* non-NULL input value. We use it as the initial value for
* transValue. (We already checked that the agg's input type
* is binary-compatible with its transtype, so straight copy
* here is OK.)
* transValue has not been initialized. This is the first non-NULL
* input value. We use it as the initial value for transValue. (We
* already checked that the agg's input type is binary-compatible
* with its transtype, so straight copy here is OK.)
*
* We must copy the datum into aggcontext if it is pass-by-ref.
* We do not need to pfree the old transValue, since it's
* NULL.
* We must copy the datum into aggcontext if it is pass-by-ref. We do
* not need to pfree the old transValue, since it's NULL.
*/
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(newVal,
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
pergroupstate->transValueIsNull = false;
pergroupstate->noTransValue = false;
MemoryContextSwitchTo(oldContext);
@ -360,10 +357,9 @@ advance_transition_function(AggState *aggstate,
{
/*
* Don't call a strict function with NULL inputs. Note it is
* possible to get here despite the above tests, if the
* transfn is strict *and* returned a NULL on a prior cycle.
* If that happens we will propagate the NULL all the way to
* the end.
* possible to get here despite the above tests, if the transfn is
* strict *and* returned a NULL on a prior cycle. If that happens
* we will propagate the NULL all the way to the end.
*/
return;
}
@ -385,12 +381,12 @@ advance_transition_function(AggState *aggstate,
newVal = FunctionCallInvoke(&fcinfo);
/*
* If pass-by-ref datatype, must copy the new value into aggcontext
* and pfree the prior transValue. But if transfn returned a pointer
* to its first input, we don't need to do anything.
* If pass-by-ref datatype, must copy the new value into aggcontext and
* pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
{
if (!fcinfo.isnull)
{
@ -473,24 +469,24 @@ process_sorted_aggregate(AggState *aggstate,
tuplesort_performsort(peraggstate->sortstate);
/*
* Note: if input type is pass-by-ref, the datums returned by the sort
* are freshly palloc'd in the per-query context, so we must be
* careful to pfree them when they are no longer needed.
* Note: if input type is pass-by-ref, the datums returned by the sort are
* freshly palloc'd in the per-query context, so we must be careful to
* pfree them when they are no longer needed.
*/
while (tuplesort_getdatum(peraggstate->sortstate, true,
&newVal, &isNull))
{
/*
* DISTINCT always suppresses nulls, per SQL spec, regardless of
* the transition function's strictness.
* DISTINCT always suppresses nulls, per SQL spec, regardless of the
* transition function's strictness.
*/
if (isNull)
continue;
/*
* Clear and select the working context for evaluation of the
* equality function and transition function.
* Clear and select the working context for evaluation of the equality
* function and transition function.
*/
MemoryContextReset(workcontext);
oldContext = MemoryContextSwitchTo(workcontext);
@ -726,8 +722,8 @@ agg_retrieve_direct(AggState *aggstate)
while (!aggstate->agg_done)
{
/*
* If we don't already have the first tuple of the new group,
* fetch it from the outer plan.
* If we don't already have the first tuple of the new group, fetch it
* from the outer plan.
*/
if (aggstate->grp_firstTuple == NULL)
{
@ -735,8 +731,8 @@ agg_retrieve_direct(AggState *aggstate)
if (!TupIsNull(outerslot))
{
/*
* Make a copy of the first input tuple; we will use this
* for comparisons (in group mode) and for projection.
* Make a copy of the first input tuple; we will use this for
* comparisons (in group mode) and for projection.
*/
aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
}
@ -764,8 +760,8 @@ agg_retrieve_direct(AggState *aggstate)
{
/*
* Store the copied first input tuple in the tuple table slot
* reserved for it. The tuple will be deleted when it is
* cleared from the slot.
* reserved for it. The tuple will be deleted when it is cleared
* from the slot.
*/
ExecStoreTuple(aggstate->grp_firstTuple,
firstSlot,
@ -807,7 +803,7 @@ agg_retrieve_direct(AggState *aggstate)
outerslot,
node->numCols, node->grpColIdx,
aggstate->eqfunctions,
tmpcontext->ecxt_per_tuple_memory))
tmpcontext->ecxt_per_tuple_memory))
{
/*
* Save the first input tuple of the next group.
@ -838,17 +834,16 @@ agg_retrieve_direct(AggState *aggstate)
/*
* If we have no first tuple (ie, the outerPlan didn't return
* anything), create a dummy all-nulls input tuple for use by
* ExecQual/ExecProject. 99.44% of the time this is a waste of
* cycles, because ordinarily the projected output tuple's
* targetlist cannot contain any direct (non-aggregated)
* references to input columns, so the dummy tuple will not be
* referenced. However there are special cases where this isn't so
* --- in particular an UPDATE involving an aggregate will have a
* targetlist reference to ctid. We need to return a null for
* ctid in that situation, not coredump.
* ExecQual/ExecProject. 99.44% of the time this is a waste of cycles,
* because ordinarily the projected output tuple's targetlist cannot
* contain any direct (non-aggregated) references to input columns, so
* the dummy tuple will not be referenced. However there are special
* cases where this isn't so --- in particular an UPDATE involving an
* aggregate will have a targetlist reference to ctid. We need to
* return a null for ctid in that situation, not coredump.
*
* The values returned for the aggregates will be the initial values
* of the transition functions.
* The values returned for the aggregates will be the initial values of
* the transition functions.
*/
if (TupIsNull(firstSlot))
{
@ -866,15 +861,15 @@ agg_retrieve_direct(AggState *aggstate)
econtext->ecxt_scantuple = firstSlot;
/*
* Check the qual (HAVING clause); if the group does not match,
* ignore it and loop back to try to process another group.
* Check the qual (HAVING clause); if the group does not match, ignore
* it and loop back to try to process another group.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
* Form and return a projection tuple using the aggregate
* results and the representative input tuple. Note we do not
* support aggregates returning sets ...
* Form and return a projection tuple using the aggregate results
* and the representative input tuple. Note we do not support
* aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
@ -903,8 +898,8 @@ agg_fill_hash_table(AggState *aggstate)
tmpcontext = aggstate->tmpcontext;
/*
* Process each outer-plan tuple, and then fetch the next one, until
* we exhaust the outer plan.
* Process each outer-plan tuple, and then fetch the next one, until we
* exhaust the outer plan.
*/
for (;;)
{
@ -979,8 +974,8 @@ agg_retrieve_hash_table(AggState *aggstate)
ResetExprContext(econtext);
/*
* Store the copied first input tuple in the tuple table slot
* reserved for it, so that it can be used in ExecProject.
* Store the copied first input tuple in the tuple table slot reserved
* for it, so that it can be used in ExecProject.
*/
ExecStoreTuple(entry->shared.firstTuple,
firstSlot,
@ -1010,15 +1005,15 @@ agg_retrieve_hash_table(AggState *aggstate)
econtext->ecxt_scantuple = firstSlot;
/*
* Check the qual (HAVING clause); if the group does not match,
* ignore it and loop back to try to process another group.
* Check the qual (HAVING clause); if the group does not match, ignore
* it and loop back to try to process another group.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
* Form and return a projection tuple using the aggregate
* results and the representative input tuple. Note we do not
* support aggregates returning sets ...
* Form and return a projection tuple using the aggregate results
* and the representative input tuple. Note we do not support
* aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
@ -1065,8 +1060,8 @@ ExecInitAgg(Agg *node, EState *estate)
/*
* Create expression contexts. We need two, one for per-input-tuple
* processing and one for per-output-tuple processing. We cheat a
* little by using ExecAssignExprContext() to build both.
* processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
@ -1074,10 +1069,10 @@ ExecInitAgg(Agg *node, EState *estate)
/*
* We also need a long-lived memory context for holding hashtable data
* structures and transition values. NOTE: the details of what is
* stored in aggcontext and what is stored in the regular per-query
* memory context are driven by a simple decision: we want to reset
* the aggcontext in ExecReScanAgg to recover no-longer-wanted space.
* structures and transition values. NOTE: the details of what is stored
* in aggcontext and what is stored in the regular per-query memory
* context are driven by a simple decision: we want to reset the
* aggcontext in ExecReScanAgg to recover no-longer-wanted space.
*/
aggstate->aggcontext =
AllocSetContextCreate(CurrentMemoryContext,
@ -1098,10 +1093,10 @@ ExecInitAgg(Agg *node, EState *estate)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
* contain other agg calls in their arguments. This would make no
* sense under SQL semantics anyway (and it's forbidden by the spec).
* Because that is true, we don't need to worry about evaluating the
* aggs in any particular order.
* contain other agg calls in their arguments. This would make no sense
* under SQL semantics anyway (and it's forbidden by the spec). Because
* that is true, we don't need to worry about evaluating the aggs in any
* particular order.
*/
aggstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->plan.targetlist,
@ -1135,20 +1130,19 @@ ExecInitAgg(Agg *node, EState *estate)
if (numaggs <= 0)
{
/*
* This is not an error condition: we might be using the Agg node
* just to do hash-based grouping. Even in the regular case,
* constant-expression simplification could optimize away all of
* the Aggrefs in the targetlist and qual. So keep going, but
* force local copy of numaggs positive so that palloc()s below
* don't choke.
* This is not an error condition: we might be using the Agg node just
* to do hash-based grouping. Even in the regular case,
* constant-expression simplification could optimize away all of the
* Aggrefs in the targetlist and qual. So keep going, but force local
* copy of numaggs positive so that palloc()s below don't choke.
*/
numaggs = 1;
}
/*
* If we are grouping, precompute fmgr lookup data for inner loop. We
* need both equality and hashing functions to do it by hashing, but
* only equality if not hashing.
* If we are grouping, precompute fmgr lookup data for inner loop. We need
* both equality and hashing functions to do it by hashing, but only
* equality if not hashing.
*/
if (node->numCols > 0)
{
@ -1166,8 +1160,8 @@ ExecInitAgg(Agg *node, EState *estate)
}
/*
* Set up aggregate-result storage in the output expr context, and
* also allocate my private per-agg working storage
* Set up aggregate-result storage in the output expr context, and also
* allocate my private per-agg working storage
*/
econtext = aggstate->ss.ps.ps_ExprContext;
econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
@ -1192,10 +1186,10 @@ ExecInitAgg(Agg *node, EState *estate)
/*
* Perform lookups of aggregate function info, and initialize the
* unchanging fields of the per-agg data. We also detect duplicate
* aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0").
* When duplicates are detected, we only make an AggStatePerAgg struct
* for the first one. The clones are simply pointed at the same
* result entry by giving them duplicate aggno values.
* aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0"). When
* duplicates are detected, we only make an AggStatePerAgg struct for the
* first one. The clones are simply pointed at the same result entry by
* giving them duplicate aggno values.
*/
aggno = -1;
foreach(l, aggstate->aggs)
@ -1243,9 +1237,9 @@ ExecInitAgg(Agg *node, EState *estate)
peraggstate->aggref = aggref;
/*
* Get actual datatype of the input. We need this because it may
* be different from the agg's declared input type, when the agg
* accepts ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT.
* Get actual datatype of the input. We need this because it may be
* different from the agg's declared input type, when the agg accepts
* ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT.
*/
inputType = exprType((Node *) aggref->target);
@ -1270,7 +1264,7 @@ ExecInitAgg(Agg *node, EState *estate)
/* Check that aggregate owner has permission to call component fns */
{
HeapTuple procTuple;
Oid aggOwner;
Oid aggOwner;
procTuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(aggref->aggfnoid),
@ -1339,8 +1333,8 @@ ExecInitAgg(Agg *node, EState *estate)
&peraggstate->transtypeByVal);
/*
* initval is potentially null, so don't try to access it as a
* struct field. Must do it the hard way with SysCacheGetAttr.
* initval is potentially null, so don't try to access it as a struct
* field. Must do it the hard way with SysCacheGetAttr.
*/
textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
Anum_pg_aggregate_agginitval,
@ -1353,11 +1347,11 @@ ExecInitAgg(Agg *node, EState *estate)
aggtranstype);
/*
* If the transfn is strict and the initval is NULL, make sure
* input type and transtype are the same (or at least binary-
* compatible), so that it's OK to use the first input value as
* the initial transValue. This should have been checked at agg
* definition time, but just in case...
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary- compatible),
* so that it's OK to use the first input value as the initial
* transValue. This should have been checked at agg definition time,
* but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
@ -1463,18 +1457,18 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
{
/*
* In the hashed case, if we haven't yet built the hash table then
* we can just return; nothing done yet, so nothing to undo. If
* subnode's chgParam is not NULL then it will be re-scanned by
* ExecProcNode, else no reason to re-scan it at all.
* In the hashed case, if we haven't yet built the hash table then we
* can just return; nothing done yet, so nothing to undo. If subnode's
* chgParam is not NULL then it will be re-scanned by ExecProcNode,
* else no reason to re-scan it at all.
*/
if (!node->table_filled)
return;
/*
* If we do have the hash table and the subplan does not have any
* parameter changes, then we can just rescan the existing hash
* table; no need to build it again.
* parameter changes, then we can just rescan the existing hash table;
* no need to build it again.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
{
@ -1516,8 +1510,7 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
else
{
/*
* Reset the per-group state (in particular, mark transvalues
* null)
* Reset the per-group state (in particular, mark transvalues null)
*/
MemSet(node->pergroup, 0,
sizeof(AggStatePerGroupData) * node->numaggs);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.64 2005/05/22 22:30:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.65 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -88,10 +88,9 @@ exec_append_initialize_next(AppendState *appendstate)
if (whichplan < appendstate->as_firstplan)
{
/*
* if scanning in reverse, we start at the last scan in the list
* and then proceed back to the first.. in any case we inform
* ExecAppend that we are at the end of the line by returning
* FALSE
* if scanning in reverse, we start at the last scan in the list and
* then proceed back to the first.. in any case we inform ExecAppend
* that we are at the end of the line by returning FALSE
*/
appendstate->as_whichplan = appendstate->as_firstplan;
return FALSE;
@ -99,8 +98,7 @@ exec_append_initialize_next(AppendState *appendstate)
else if (whichplan > appendstate->as_lastplan)
{
/*
* as above, end the scan if we go beyond the last scan in our
* list..
* as above, end the scan if we go beyond the last scan in our list..
*/
appendstate->as_whichplan = appendstate->as_lastplan;
return FALSE;
@ -110,8 +108,8 @@ exec_append_initialize_next(AppendState *appendstate)
/*
* initialize the scan
*
* If we are controlling the target relation, select the proper
* active ResultRelInfo and junk filter for this target.
* If we are controlling the target relation, select the proper active
* ResultRelInfo and junk filter for this target.
*/
if (((Append *) appendstate->ps.plan)->isTarget)
{
@ -168,9 +166,8 @@ ExecInitAppend(Append *node, EState *estate)
appendstate->as_nplans = nplans;
/*
* Do we want to scan just one subplan? (Special case for
* EvalPlanQual) XXX pretty dirty way of determining that this case
* applies ...
* Do we want to scan just one subplan? (Special case for EvalPlanQual)
* XXX pretty dirty way of determining that this case applies ...
*/
if (node->isTarget && estate->es_evTuple != NULL)
{
@ -199,8 +196,8 @@ ExecInitAppend(Append *node, EState *estate)
#define APPEND_NSLOTS 1
/*
* append nodes still have Result slots, which hold pointers to
* tuples, so we have to initialize them.
* append nodes still have Result slots, which hold pointers to tuples, so
* we have to initialize them.
*/
ExecInitResultTupleSlot(estate, &appendstate->ps);
@ -220,10 +217,10 @@ ExecInitAppend(Append *node, EState *estate)
}
/*
* Initialize tuple type. (Note: in an inherited UPDATE situation,
* the tuple type computed here corresponds to the parent table, which
* is really a lie since tuples returned from child subplans will not
* all look the same.)
* Initialize tuple type. (Note: in an inherited UPDATE situation, the
* tuple type computed here corresponds to the parent table, which is
* really a lie since tuples returned from child subplans will not all
* look the same.)
*/
ExecAssignResultTypeFromTL(&appendstate->ps);
appendstate->ps.ps_ProjInfo = NULL;
@ -275,19 +272,19 @@ ExecAppend(AppendState *node)
if (!TupIsNull(result))
{
/*
* If the subplan gave us something then return it as-is.
* We do NOT make use of the result slot that was set up in
* ExecInitAppend, first because there's no reason to and
* second because it may have the wrong tuple descriptor in
* If the subplan gave us something then return it as-is. We do
* NOT make use of the result slot that was set up in
* ExecInitAppend, first because there's no reason to and second
* because it may have the wrong tuple descriptor in
* inherited-UPDATE cases.
*/
return result;
}
/*
* Go on to the "next" subplan in the appropriate direction.
* If no more subplans, return the empty slot set up for us
* by ExecInitAppend.
* Go on to the "next" subplan in the appropriate direction. If no
* more subplans, return the empty slot set up for us by
* ExecInitAppend.
*/
if (ScanDirectionIsForward(node->ps.state->es_direction))
node->as_whichplan++;
@ -348,8 +345,8 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt)
UpdateChangedParamSet(subnode, node->ps.chgParam);
/*
* if chgParam of subnode is not null then plan will be re-scanned
* by first ExecProcNode.
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
*/
if (subnode->chgParam == NULL)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.3 2005/08/28 22:47:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.4 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -16,7 +16,7 @@
* ExecInitBitmapAnd - initialize the BitmapAnd node
* MultiExecBitmapAnd - retrieve the result bitmap from the node
* ExecEndBitmapAnd - shut down the BitmapAnd node
* ExecReScanBitmapAnd - rescan the BitmapAnd node
* ExecReScanBitmapAnd - rescan the BitmapAnd node
*
* NOTES
* BitmapAnd nodes don't make use of their left and right
@ -137,7 +137,7 @@ MultiExecBitmapAnd(BitmapAndState *node)
elog(ERROR, "unrecognized result from subplan");
if (result == NULL)
result = subresult; /* first subplan */
result = subresult; /* first subplan */
else
{
tbm_intersect(result, subresult);
@ -145,11 +145,11 @@ MultiExecBitmapAnd(BitmapAndState *node)
}
/*
* If at any stage we have a completely empty bitmap, we can fall
* out without evaluating the remaining subplans, since ANDing them
* can no longer change the result. (Note: the fact that indxpath.c
* orders the subplans by selectivity should make this case more
* likely to occur.)
* If at any stage we have a completely empty bitmap, we can fall out
* without evaluating the remaining subplans, since ANDing them can no
* longer change the result. (Note: the fact that indxpath.c orders
* the subplans by selectivity should make this case more likely to
* occur.)
*/
if (tbm_is_empty(result))
break;
@ -160,7 +160,7 @@ MultiExecBitmapAnd(BitmapAndState *node)
/* must provide our own instrumentation support */
if (node->ps.instrument)
InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */);
InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ );
return (Node *) result;
}

View File

@ -5,7 +5,7 @@
*
* NOTE: it is critical that this plan type only be used with MVCC-compliant
* snapshots (ie, regular snapshots, not SnapshotNow or one of the other
* special snapshots). The reason is that since index and heap scans are
* special snapshots). The reason is that since index and heap scans are
* decoupled, there can be no assurance that the index tuple prompting a
* visit to a particular heap TID still exists when the visit is made.
* Therefore the tuple might not exist anymore either (which is OK because
@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.3 2005/10/06 02:29:16 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.4 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -76,11 +76,11 @@ BitmapHeapNext(BitmapHeapScanState *node)
tbmres = node->tbmres;
/*
* Clear any reference to the previously returned tuple. The idea
* here is to not have the tuple slot be the last holder of a pin on
* that tuple's buffer; if it is, we'll need a separate visit to the
* bufmgr to release the buffer. By clearing here, we get to have the
* release done by ReleaseAndReadBuffer, below.
* Clear any reference to the previously returned tuple. The idea here is
* to not have the tuple slot be the last holder of a pin on that tuple's
* buffer; if it is, we'll need a separate visit to the bufmgr to release
* the buffer. By clearing here, we get to have the release done by
* ReleaseAndReadBuffer, below.
*/
ExecClearTuple(slot);
@ -105,7 +105,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
ResetExprContext(econtext);
if (!ExecQual(node->bitmapqualorig, econtext, false))
ExecClearTuple(slot); /* would not be returned by scan */
ExecClearTuple(slot); /* would not be returned by scan */
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[scanrelid - 1] = true;
@ -114,8 +114,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
}
/*
* If we haven't yet performed the underlying index scan, do it,
* and prepare the bitmap to be iterated over.
* If we haven't yet performed the underlying index scan, do it, and
* prepare the bitmap to be iterated over.
*/
if (tbm == NULL)
{
@ -145,10 +145,10 @@ BitmapHeapNext(BitmapHeapScanState *node)
}
/*
* Ignore any claimed entries past what we think is the end of
* the relation. (This is probably not necessary given that we
* got AccessShareLock before performing any of the indexscans,
* but let's be safe.)
* Ignore any claimed entries past what we think is the end of the
* relation. (This is probably not necessary given that we got
* AccessShareLock before performing any of the indexscans, but
* let's be safe.)
*/
if (tbmres->blockno >= scandesc->rs_nblocks)
{
@ -157,19 +157,18 @@ BitmapHeapNext(BitmapHeapScanState *node)
}
/*
* Acquire pin on the current heap page. We'll hold the pin
* until done looking at the page. We trade in any pin we
* held before.
* Acquire pin on the current heap page. We'll hold the pin until
* done looking at the page. We trade in any pin we held before.
*/
scandesc->rs_cbuf = ReleaseAndReadBuffer(scandesc->rs_cbuf,
scandesc->rs_rd,
tbmres->blockno);
/*
* Determine how many entries we need to look at on this page.
* If the bitmap is lossy then we need to look at each physical
* item pointer; otherwise we just look through the offsets
* listed in tbmres.
* Determine how many entries we need to look at on this page. If
* the bitmap is lossy then we need to look at each physical item
* pointer; otherwise we just look through the offsets listed in
* tbmres.
*/
if (tbmres->ntuples >= 0)
{
@ -180,7 +179,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
else
{
/* lossy case */
Page dp;
Page dp;
LockBuffer(scandesc->rs_cbuf, BUFFER_LOCK_SHARE);
dp = (Page) BufferGetPage(scandesc->rs_cbuf);
@ -230,8 +229,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
ItemPointerSet(&scandesc->rs_ctup.t_self, tbmres->blockno, targoffset);
/*
* Fetch the heap tuple and see if it matches the snapshot.
* We use heap_release_fetch to avoid useless bufmgr traffic.
* Fetch the heap tuple and see if it matches the snapshot. We use
* heap_release_fetch to avoid useless bufmgr traffic.
*/
if (heap_release_fetch(scandesc->rs_rd,
scandesc->rs_snapshot,
@ -241,8 +240,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
&scandesc->rs_pgstat_info))
{
/*
* Set up the result slot to point to this tuple.
* Note that the slot acquires a pin on the buffer.
* Set up the result slot to point to this tuple. Note that the
* slot acquires a pin on the buffer.
*/
ExecStoreTuple(&scandesc->rs_ctup,
slot,
@ -338,8 +337,8 @@ ExecBitmapHeapReScan(BitmapHeapScanState *node, ExprContext *exprCtxt)
node->tbmres = NULL;
/*
* Always rescan the input immediately, to ensure we can pass down
* any outer tuple that might be used in index quals.
* Always rescan the input immediately, to ensure we can pass down any
* outer tuple that might be used in index quals.
*/
ExecReScan(outerPlanState(node), exprCtxt);
}
@ -391,9 +390,9 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
* ExecInitBitmapHeapScan. This lock should be held till end of
* transaction. (There is a faction that considers this too much
* locking, however.)
* ExecInitBitmapHeapScan. This lock should be held till end of
* transaction. (There is a faction that considers this too much locking,
* however.)
*/
heap_close(relation, NoLock);
}
@ -470,9 +469,9 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate)
scanstate->ss.ss_currentRelation = currentRelation;
/*
* Even though we aren't going to do a conventional seqscan, it is
* useful to create a HeapScanDesc --- this checks the relation size
* and sets up statistical infrastructure for us.
* Even though we aren't going to do a conventional seqscan, it is useful
* to create a HeapScanDesc --- this checks the relation size and sets up
* statistical infrastructure for us.
*/
scanstate->ss.ss_currentScanDesc = heap_beginscan(currentRelation,
estate->es_snapshot,
@ -482,7 +481,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate)
/*
* One problem is that heap_beginscan counts a "sequential scan" start,
* when we actually aren't doing any such thing. Reverse out the added
* scan count. (Eventually we may want to count bitmap scans separately.)
* scan count. (Eventually we may want to count bitmap scans separately.)
*/
pgstat_discount_heap_scan(&scanstate->ss.ss_currentScanDesc->rs_pgstat_info);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.9 2005/05/06 17:24:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -54,17 +54,16 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
scandesc = node->biss_ScanDesc;
/*
* If we have runtime keys and they've not already been set up, do it
* now.
* If we have runtime keys and they've not already been set up, do it now.
*/
if (node->biss_RuntimeKeyInfo && !node->biss_RuntimeKeysReady)
ExecReScan((PlanState *) node, NULL);
/*
* Prepare the result bitmap. Normally we just create a new one to pass
* back; however, our parent node is allowed to store a pre-made one
* into node->biss_result, in which case we just OR our tuple IDs into
* the existing bitmap. (This saves needing explicit UNION steps.)
* back; however, our parent node is allowed to store a pre-made one into
* node->biss_result, in which case we just OR our tuple IDs into the
* existing bitmap. (This saves needing explicit UNION steps.)
*/
if (node->biss_result)
{
@ -82,7 +81,7 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
*/
for (;;)
{
bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids);
bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids);
if (ntids > 0)
{
@ -116,8 +115,7 @@ ExecBitmapIndexReScan(BitmapIndexScanState *node, ExprContext *exprCtxt)
ExprContext *econtext;
ExprState **runtimeKeyInfo;
econtext = node->biss_RuntimeContext; /* context for runtime
* keys */
econtext = node->biss_RuntimeContext; /* context for runtime keys */
runtimeKeyInfo = node->biss_RuntimeKeyInfo;
if (econtext)
@ -130,16 +128,16 @@ ExecBitmapIndexReScan(BitmapIndexScanState *node, ExprContext *exprCtxt)
econtext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
/*
* Reset the runtime-key context so we don't leak memory as each
* outer tuple is scanned. Note this assumes that we will
* recalculate *all* runtime keys on each call.
* Reset the runtime-key context so we don't leak memory as each outer
* tuple is scanned. Note this assumes that we will recalculate *all*
* runtime keys on each call.
*/
ResetExprContext(econtext);
}
/*
* If we are doing runtime key calculations (ie, the index keys depend
* on data from an outer scan), compute the new key values
* If we are doing runtime key calculations (ie, the index keys depend on
* data from an outer scan), compute the new key values
*/
if (runtimeKeyInfo)
{
@ -213,8 +211,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
/*
* Miscellaneous initialization
*
* We do not need a standard exprcontext for this node, though we may
* decide below to create a runtime-key exprcontext
* We do not need a standard exprcontext for this node, though we may decide
* below to create a runtime-key exprcontext
*/
/*
@ -252,10 +250,10 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
indexstate->biss_NumScanKeys = numScanKeys;
/*
* If we have runtime keys, we need an ExprContext to evaluate them.
* We could just create a "standard" plan node exprcontext, but to
* keep the code looking similar to nodeIndexscan.c, it seems better
* to stick with the approach of using a separate ExprContext.
* If we have runtime keys, we need an ExprContext to evaluate them. We
* could just create a "standard" plan node exprcontext, but to keep the
* code looking similar to nodeIndexscan.c, it seems better to stick with
* the approach of using a separate ExprContext.
*/
if (have_runtime_keys)
{
@ -272,17 +270,17 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
/*
* We do not open or lock the base relation here. We assume that an
* ancestor BitmapHeapScan node is holding AccessShareLock on the
* heap relation throughout the execution of the plan tree.
* ancestor BitmapHeapScan node is holding AccessShareLock on the heap
* relation throughout the execution of the plan tree.
*/
indexstate->ss.ss_currentRelation = NULL;
indexstate->ss.ss_currentScanDesc = NULL;
/*
* open the index relation and initialize relation and scan
* descriptors. Note we acquire no locks here; the index machinery
* does its own locks and unlocks.
* open the index relation and initialize relation and scan descriptors.
* Note we acquire no locks here; the index machinery does its own locks
* and unlocks.
*/
indexstate->biss_RelationDesc = index_open(node->indexid);
indexstate->biss_ScanDesc =

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.2 2005/04/20 15:48:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.3 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -133,13 +133,13 @@ MultiExecBitmapOr(BitmapOrState *node)
TIDBitmap *subresult;
/*
* We can special-case BitmapIndexScan children to avoid an
* explicit tbm_union step for each child: just pass down the
* current result bitmap and let the child OR directly into it.
* We can special-case BitmapIndexScan children to avoid an explicit
* tbm_union step for each child: just pass down the current result
* bitmap and let the child OR directly into it.
*/
if (IsA(subnode, BitmapIndexScanState))
{
if (result == NULL) /* first subplan */
if (result == NULL) /* first subplan */
{
/* XXX should we use less than work_mem for this? */
result = tbm_create(work_mem * 1024L);
@ -161,7 +161,7 @@ MultiExecBitmapOr(BitmapOrState *node)
elog(ERROR, "unrecognized result from subplan");
if (result == NULL)
result = subresult; /* first subplan */
result = subresult; /* first subplan */
else
{
tbm_union(result, subresult);
@ -176,7 +176,7 @@ MultiExecBitmapOr(BitmapOrState *node)
/* must provide our own instrumentation support */
if (node->ps.instrument)
InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */);
InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ );
return (Node *) result;
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.34 2005/05/22 22:30:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.35 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -60,9 +60,8 @@ FunctionNext(FunctionScanState *node)
tuplestorestate = node->tuplestorestate;
/*
* If first time through, read all tuples from function and put them
* in a tuplestore. Subsequent calls just fetch tuples from
* tuplestore.
* If first time through, read all tuples from function and put them in a
* tuplestore. Subsequent calls just fetch tuples from tuplestore.
*/
if (tuplestorestate == NULL)
{
@ -77,10 +76,10 @@ FunctionNext(FunctionScanState *node)
/*
* If function provided a tupdesc, cross-check it. We only really
* need to do this for functions returning RECORD, but might as
* well do it always.
* need to do this for functions returning RECORD, but might as well
* do it always.
*/
if (funcTupdesc)
if (funcTupdesc)
tupledesc_match(node->tupdesc, funcTupdesc);
}
@ -174,8 +173,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate)
Assert(rte->rtekind == RTE_FUNCTION);
/*
* Now determine if the function returns a simple or composite type,
* and build an appropriate tupdesc.
* Now determine if the function returns a simple or composite type, and
* build an appropriate tupdesc.
*/
functypclass = get_expr_result_type(rte->funcexpr,
&funcrettype,
@ -213,8 +212,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate)
/*
* For RECORD results, make sure a typmod has been assigned. (The
* function should do this for itself, but let's cover things in case
* it doesn't.)
* function should do this for itself, but let's cover things in case it
* doesn't.)
*/
BlessTupleDesc(tupdesc);
@ -329,10 +328,10 @@ ExecFunctionReScan(FunctionScanState *node, ExprContext *exprCtxt)
return;
/*
* Here we have a choice whether to drop the tuplestore (and recompute
* the function outputs) or just rescan it. This should depend on
* whether the function expression contains parameters and/or is
* marked volatile. FIXME soon.
* Here we have a choice whether to drop the tuplestore (and recompute the
* function outputs) or just rescan it. This should depend on whether the
* function expression contains parameters and/or is marked volatile.
* FIXME soon.
*/
if (node->ss.ps.chgParam != NULL)
{
@ -376,7 +375,7 @@ tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function return row and query-specified return row do not match"),
errdetail("Returned type %s at ordinal position %d, but query expects %s.",
errdetail("Returned type %s at ordinal position %d, but query expects %s.",
format_type_be(sattr->atttypid),
i + 1,
format_type_be(dattr->atttypid))));

View File

@ -15,7 +15,7 @@
* locate group boundaries.
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.61 2005/03/16 21:38:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.62 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -61,8 +61,8 @@ ExecGroup(GroupState *node)
*/
/*
* If first time through, acquire first input tuple and determine
* whether to return it or not.
* If first time through, acquire first input tuple and determine whether
* to return it or not.
*/
if (TupIsNull(firsttupleslot))
{
@ -76,15 +76,15 @@ ExecGroup(GroupState *node)
/* Copy tuple, set up as input for qual test and projection */
ExecCopySlot(firsttupleslot, outerslot);
econtext->ecxt_scantuple = firsttupleslot;
/*
* Check the qual (HAVING clause); if the group does not match,
* ignore it and fall into scan loop.
* Check the qual (HAVING clause); if the group does not match, ignore
* it and fall into scan loop.
*/
if (ExecQual(node->ss.ps.qual, econtext, false))
{
/*
* Form and return a projection tuple using the first input
* tuple.
* Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
}
@ -92,8 +92,8 @@ ExecGroup(GroupState *node)
/*
* This loop iterates once per input tuple group. At the head of the
* loop, we have finished processing the first tuple of the group and
* now need to scan over all the other group members.
* loop, we have finished processing the first tuple of the group and now
* need to scan over all the other group members.
*/
for (;;)
{
@ -120,22 +120,23 @@ ExecGroup(GroupState *node)
econtext->ecxt_per_tuple_memory))
break;
}
/*
* We have the first tuple of the next input group. See if we
* want to return it.
* We have the first tuple of the next input group. See if we want to
* return it.
*/
/* Copy tuple, set up as input for qual test and projection */
ExecCopySlot(firsttupleslot, outerslot);
econtext->ecxt_scantuple = firsttupleslot;
/*
* Check the qual (HAVING clause); if the group does not match,
* ignore it and loop back to scan the rest of the group.
* Check the qual (HAVING clause); if the group does not match, ignore
* it and loop back to scan the rest of the group.
*/
if (ExecQual(node->ss.ps.qual, econtext, false))
{
/*
* Form and return a projection tuple using the first input
* tuple.
* Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.95 2005/09/25 19:37:34 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -100,11 +100,11 @@ MultiExecHash(HashState *node)
InstrStopNodeMulti(node->ps.instrument, hashtable->totalTuples);
/*
* We do not return the hash table directly because it's not a subtype
* of Node, and so would violate the MultiExecProcNode API. Instead,
* our parent Hashjoin node is expected to know how to fish it out
* of our node state. Ugly but not really worth cleaning up, since
* Hashjoin knows quite a bit more about Hash besides that.
* We do not return the hash table directly because it's not a subtype of
* Node, and so would violate the MultiExecProcNode API. Instead, our
* parent Hashjoin node is expected to know how to fish it out of our node
* state. Ugly but not really worth cleaning up, since Hashjoin knows
* quite a bit more about Hash besides that.
*/
return NULL;
}
@ -161,8 +161,8 @@ ExecInitHash(Hash *node, EState *estate)
outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate);
/*
* initialize tuple type. no need to initialize projection info
* because this node doesn't do projections
* initialize tuple type. no need to initialize projection info because
* this node doesn't do projections
*/
ExecAssignResultTypeFromOuterPlan(&hashstate->ps);
hashstate->ps.ps_ProjInfo = NULL;
@ -221,9 +221,9 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
MemoryContext oldcxt;
/*
* Get information about the size of the relation to be hashed (it's
* the "outer" subtree of this node, but the inner relation of the
* hashjoin). Compute the appropriate size of the hash table.
* Get information about the size of the relation to be hashed (it's the
* "outer" subtree of this node, but the inner relation of the hashjoin).
* Compute the appropriate size of the hash table.
*/
outerNode = outerPlan(node);
@ -237,8 +237,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
/*
* Initialize the hash table control block.
*
* The hashtable control block is just palloc'd from the executor's
* per-query memory context.
* The hashtable control block is just palloc'd from the executor's per-query
* memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
hashtable->nbuckets = nbuckets;
@ -273,8 +273,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
}
/*
* Create temporary memory contexts in which to keep the hashtable
* working storage. See notes in executor/hashjoin.h.
* Create temporary memory contexts in which to keep the hashtable working
* storage. See notes in executor/hashjoin.h.
*/
hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
"HashTableContext",
@ -353,9 +353,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
ntuples = 1000.0;
/*
* Estimate tupsize based on footprint of tuple in hashtable... note
* this does not allow for any palloc overhead. The manipulations of
* spaceUsed don't count palloc overhead either.
* Estimate tupsize based on footprint of tuple in hashtable... note this
* does not allow for any palloc overhead. The manipulations of spaceUsed
* don't count palloc overhead either.
*/
tupsize = MAXALIGN(sizeof(HashJoinTupleData)) +
MAXALIGN(sizeof(HeapTupleHeaderData)) +
@ -375,16 +375,16 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
if (inner_rel_bytes > hash_table_bytes)
{
/* We'll need multiple batches */
long lbuckets;
double dbatch;
int minbatch;
long lbuckets;
double dbatch;
int minbatch;
lbuckets = (hash_table_bytes / tupsize) / NTUP_PER_BUCKET;
lbuckets = Min(lbuckets, INT_MAX);
nbuckets = (int) lbuckets;
dbatch = ceil(inner_rel_bytes / hash_table_bytes);
dbatch = Min(dbatch, INT_MAX/2);
dbatch = Min(dbatch, INT_MAX / 2);
minbatch = (int) dbatch;
nbatch = 2;
while (nbatch < minbatch)
@ -393,7 +393,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
else
{
/* We expect the hashtable to fit in memory */
double dbuckets;
double dbuckets;
dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
dbuckets = Min(dbuckets, INT_MAX);
@ -406,8 +406,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
* We want nbuckets to be prime so as to avoid having bucket and batch
* numbers depend on only some bits of the hash code. Choose the next
* larger prime from the list in hprimes[]. (This also enforces that
* nbuckets is not very small, by the simple expedient of not putting
* any very small entries in hprimes[].)
* nbuckets is not very small, by the simple expedient of not putting any
* very small entries in hprimes[].)
*/
for (i = 0; i < (int) lengthof(hprimes); i++)
{
@ -475,7 +475,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
return;
/* safety check to avoid overflow */
if (oldnbatch > INT_MAX/2)
if (oldnbatch > INT_MAX / 2)
return;
nbatch = oldnbatch * 2;
@ -514,8 +514,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
hashtable->nbatch = nbatch;
/*
* Scan through the existing hash table entries and dump out any
* that are no longer of the current batch.
* Scan through the existing hash table entries and dump out any that are
* no longer of the current batch.
*/
ninmemory = nfreed = 0;
@ -571,12 +571,12 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
#endif
/*
* If we dumped out either all or none of the tuples in the table,
* disable further expansion of nbatch. This situation implies that
* we have enough tuples of identical hashvalues to overflow spaceAllowed.
* Increasing nbatch will not fix it since there's no way to subdivide
* the group any more finely.
* We have to just gut it out and hope the server has enough RAM.
* If we dumped out either all or none of the tuples in the table, disable
* further expansion of nbatch. This situation implies that we have
* enough tuples of identical hashvalues to overflow spaceAllowed.
* Increasing nbatch will not fix it since there's no way to subdivide the
* group any more finely. We have to just gut it out and hope the server
* has enough RAM.
*/
if (nfreed == 0 || nfreed == ninmemory)
{
@ -663,8 +663,8 @@ ExecHashGetHashValue(HashJoinTable hashtable,
MemoryContext oldContext;
/*
* We reset the eval context each time to reclaim any memory leaked in
* the hashkey expressions.
* We reset the eval context each time to reclaim any memory leaked in the
* hashkey expressions.
*/
ResetExprContext(econtext);
@ -727,8 +727,8 @@ ExecHashGetBucketAndBatch(HashJoinTable hashtable,
int *bucketno,
int *batchno)
{
uint32 nbuckets = (uint32) hashtable->nbuckets;
uint32 nbatch = (uint32) hashtable->nbatch;
uint32 nbuckets = (uint32) hashtable->nbuckets;
uint32 nbatch = (uint32) hashtable->nbatch;
if (nbatch > 1)
{
@ -759,8 +759,8 @@ ExecScanHashBucket(HashJoinState *hjstate,
uint32 hashvalue = hjstate->hj_CurHashValue;
/*
* hj_CurTuple is NULL to start scanning a new bucket, or the address
* of the last tuple returned from the current bucket.
* hj_CurTuple is NULL to start scanning a new bucket, or the address of
* the last tuple returned from the current bucket.
*/
if (hashTuple == NULL)
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
@ -812,8 +812,8 @@ ExecHashTableReset(HashJoinTable hashtable)
int nbuckets = hashtable->nbuckets;
/*
* Release all the hash buckets and tuples acquired in the prior pass,
* and reinitialize the context for a new pass.
* Release all the hash buckets and tuples acquired in the prior pass, and
* reinitialize the context for a new pass.
*/
MemoryContextReset(hashtable->batchCxt);
oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.73 2005/09/25 19:37:34 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.74 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -24,8 +24,8 @@
static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *outerNode,
HashJoinState *hjstate,
uint32 *hashvalue);
HashJoinState *hjstate,
uint32 *hashvalue);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
uint32 *hashvalue,
@ -77,9 +77,9 @@ ExecHashJoin(HashJoinState *node)
econtext = node->js.ps.ps_ExprContext;
/*
* Check to see if we're still projecting out tuples from a previous
* join tuple (because there is a function-returning-set in the
* projection expressions). If so, try to project another one.
* Check to see if we're still projecting out tuples from a previous join
* tuple (because there is a function-returning-set in the projection
* expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
@ -93,17 +93,17 @@ ExecHashJoin(HashJoinState *node)
}
/*
* If we're doing an IN join, we want to return at most one row per
* outer tuple; so we can stop scanning the inner scan if we matched
* on the previous try.
* If we're doing an IN join, we want to return at most one row per outer
* tuple; so we can stop scanning the inner scan if we matched on the
* previous try.
*/
if (node->js.jointype == JOIN_IN && node->hj_MatchedOuter)
node->hj_NeedNewOuter = true;
/*
* Reset per-tuple memory context to free any expression evaluation
* storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* storage allocated in the previous tuple cycle. Note this can't happen
* until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
@ -114,17 +114,17 @@ ExecHashJoin(HashJoinState *node)
{
/*
* If the outer relation is completely empty, we can quit without
* building the hash table. However, for an inner join it is only
* a win to check this when the outer relation's startup cost is less
* than the projected cost of building the hash table. Otherwise
* it's best to build the hash table first and see if the inner
* relation is empty. (When it's an outer join, we should always
* make this check, since we aren't going to be able to skip the
* join on the strength of an empty inner relation anyway.)
* building the hash table. However, for an inner join it is only a
* win to check this when the outer relation's startup cost is less
* than the projected cost of building the hash table. Otherwise it's
* best to build the hash table first and see if the inner relation is
* empty. (When it's an outer join, we should always make this check,
* since we aren't going to be able to skip the join on the strength
* of an empty inner relation anyway.)
*
* The only way to make the check is to try to fetch a tuple from
* the outer plan node. If we succeed, we have to stash it away
* for later consumption by ExecHashJoinOuterGetTuple.
* The only way to make the check is to try to fetch a tuple from the
* outer plan node. If we succeed, we have to stash it away for later
* consumption by ExecHashJoinOuterGetTuple.
*/
if (outerNode->plan->startup_cost < hashNode->ps.plan->total_cost ||
node->js.jointype == JOIN_LEFT)
@ -150,8 +150,8 @@ ExecHashJoin(HashJoinState *node)
(void) MultiExecProcNode((PlanState *) hashNode);
/*
* If the inner relation is completely empty, and we're not doing
* an outer join, we can quit without scanning the outer relation.
* If the inner relation is completely empty, and we're not doing an
* outer join, we can quit without scanning the outer relation.
*/
if (hashtable->totalTuples == 0 && node->js.jointype != JOIN_LEFT)
{
@ -193,8 +193,8 @@ ExecHashJoin(HashJoinState *node)
node->hj_MatchedOuter = false;
/*
* now we have an outer tuple, find the corresponding bucket
* for this tuple from the hash table
* now we have an outer tuple, find the corresponding bucket for
* this tuple from the hash table
*/
node->hj_CurHashValue = hashvalue;
ExecHashGetBucketAndBatch(hashtable, hashvalue,
@ -202,21 +202,21 @@ ExecHashJoin(HashJoinState *node)
node->hj_CurTuple = NULL;
/*
* Now we've got an outer tuple and the corresponding hash
* bucket, but this tuple may not belong to the current batch.
* Now we've got an outer tuple and the corresponding hash bucket,
* but this tuple may not belong to the current batch.
*/
if (batchno != hashtable->curbatch)
{
/*
* Need to postpone this outer tuple to a later batch.
* Save it in the corresponding outer-batch file.
* Need to postpone this outer tuple to a later batch. Save it
* in the corresponding outer-batch file.
*/
Assert(batchno > hashtable->curbatch);
ExecHashJoinSaveTuple(ExecFetchSlotTuple(outerTupleSlot),
hashvalue,
&hashtable->outerBatchFile[batchno]);
node->hj_NeedNewOuter = true;
continue; /* loop around for a new outer tuple */
continue; /* loop around for a new outer tuple */
}
}
@ -243,11 +243,11 @@ ExecHashJoin(HashJoinState *node)
/*
* if we pass the qual, then save state for next call and have
* ExecProject form the projection, store it in the tuple
* table, and return the slot.
* ExecProject form the projection, store it in the tuple table,
* and return the slot.
*
* Only the joinquals determine MatchedOuter status, but all
* quals must pass to actually return the tuple.
* Only the joinquals determine MatchedOuter status, but all quals
* must pass to actually return the tuple.
*/
if (joinqual == NIL || ExecQual(joinqual, econtext, false))
{
@ -268,8 +268,7 @@ ExecHashJoin(HashJoinState *node)
}
/*
* If we didn't return a tuple, may need to set
* NeedNewOuter
* If we didn't return a tuple, may need to set NeedNewOuter
*/
if (node->js.jointype == JOIN_IN)
{
@ -281,8 +280,8 @@ ExecHashJoin(HashJoinState *node)
/*
* Now the current outer tuple has run out of matches, so check
* whether to emit a dummy outer-join tuple. If not, loop around
* to get a new outer tuple.
* whether to emit a dummy outer-join tuple. If not, loop around to
* get a new outer tuple.
*/
node->hj_NeedNewOuter = true;
@ -290,19 +289,17 @@ ExecHashJoin(HashJoinState *node)
node->js.jointype == JOIN_LEFT)
{
/*
* We are doing an outer join and there were no join matches
* for this outer tuple. Generate a fake join tuple with
* nulls for the inner tuple, and return it if it passes the
* non-join quals.
* We are doing an outer join and there were no join matches for
* this outer tuple. Generate a fake join tuple with nulls for
* the inner tuple, and return it if it passes the non-join quals.
*/
econtext->ecxt_innertuple = node->hj_NullInnerTupleSlot;
if (ExecQual(otherqual, econtext, false))
{
/*
* qualification was satisfied so we project and return
* the slot containing the result tuple using
* ExecProject().
* qualification was satisfied so we project and return the
* slot containing the result tuple using ExecProject().
*/
TupleTableSlot *result;
@ -392,7 +389,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
case JOIN_LEFT:
hjstate->hj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
ExecGetResultType(innerPlanState(hjstate)));
ExecGetResultType(innerPlanState(hjstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@ -400,11 +397,11 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
}
/*
* now for some voodoo. our temporary tuple slot is actually the
* result tuple slot of the Hash node (which is our inner plan). we
* do this because Hash nodes don't return tuples via ExecProcNode()
* -- instead the hash join node uses ExecScanHashBucket() to get at
* the contents of the hash table. -cim 6/9/91
* now for some voodoo. our temporary tuple slot is actually the result
* tuple slot of the Hash node (which is our inner plan). we do this
* because Hash nodes don't return tuples via ExecProcNode() -- instead
* the hash join node uses ExecScanHashBucket() to get at the contents of
* the hash table. -cim 6/9/91
*/
{
HashState *hashstate = (HashState *) innerPlanState(hjstate);
@ -434,10 +431,10 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
hjstate->hj_CurTuple = NULL;
/*
* Deconstruct the hash clauses into outer and inner argument values,
* so that we can evaluate those subexpressions separately. Also make
* a list of the hash operator OIDs, in preparation for looking up the
* hash functions to use.
* Deconstruct the hash clauses into outer and inner argument values, so
* that we can evaluate those subexpressions separately. Also make a list
* of the hash operator OIDs, in preparation for looking up the hash
* functions to use.
*/
lclauses = NIL;
rclauses = NIL;
@ -536,6 +533,7 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode,
if (curbatch == 0)
{ /* if it is the first pass */
/*
* Check to see if first outer tuple was already fetched by
* ExecHashJoin() and not used yet.
@ -560,16 +558,16 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode,
}
/*
* We have just reached the end of the first pass. Try to switch
* to a saved batch.
* We have just reached the end of the first pass. Try to switch to a
* saved batch.
*/
curbatch = ExecHashJoinNewBatch(hjstate);
}
/*
* Try to read from a temp file. Loop allows us to advance to new
* batches as needed. NOTE: nbatch could increase inside
* ExecHashJoinNewBatch, so don't try to optimize this loop.
* Try to read from a temp file. Loop allows us to advance to new batches
* as needed. NOTE: nbatch could increase inside ExecHashJoinNewBatch, so
* don't try to optimize this loop.
*/
while (curbatch < hashtable->nbatch)
{
@ -623,16 +621,16 @@ start_over:
* sides. We can sometimes skip over batches that are empty on only one
* side, but there are exceptions:
*
* 1. In a LEFT JOIN, we have to process outer batches even if the
* inner batch is empty.
* 1. In a LEFT JOIN, we have to process outer batches even if the inner
* batch is empty.
*
* 2. If we have increased nbatch since the initial estimate, we have
* to scan inner batches since they might contain tuples that need to
* be reassigned to later inner batches.
* 2. If we have increased nbatch since the initial estimate, we have to scan
* inner batches since they might contain tuples that need to be
* reassigned to later inner batches.
*
* 3. Similarly, if we have increased nbatch since starting the outer
* scan, we have to rescan outer batches in case they contain tuples
* that need to be reassigned.
* 3. Similarly, if we have increased nbatch since starting the outer scan,
* we have to rescan outer batches in case they contain tuples that need
* to be reassigned.
*/
curbatch++;
while (curbatch < nbatch &&
@ -676,7 +674,7 @@ start_over:
if (BufFileSeek(innerFile, 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not rewind hash-join temporary file: %m")));
errmsg("could not rewind hash-join temporary file: %m")));
while ((slot = ExecHashJoinGetSavedTuple(hjstate,
innerFile,
@ -684,8 +682,8 @@ start_over:
hjstate->hj_HashTupleSlot)))
{
/*
* NOTE: some tuples may be sent to future batches. Also,
* it is possible for hashtable->nbatch to be increased here!
* NOTE: some tuples may be sent to future batches. Also, it is
* possible for hashtable->nbatch to be increased here!
*/
ExecHashTableInsert(hashtable,
ExecFetchSlotTuple(slot),
@ -733,7 +731,7 @@ void
ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue,
BufFile **fileptr)
{
BufFile *file = *fileptr;
BufFile *file = *fileptr;
size_t written;
if (file == NULL)
@ -764,7 +762,7 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue,
/*
* ExecHashJoinGetSavedTuple
* read the next tuple from a batch file. Return NULL if no more.
* read the next tuple from a batch file. Return NULL if no more.
*
* On success, *hashvalue is set to the tuple's hash value, and the tuple
* itself is stored in the given slot.
@ -809,18 +807,18 @@ void
ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
{
/*
* If we haven't yet built the hash table then we can just return;
* nothing done yet, so nothing to undo.
* If we haven't yet built the hash table then we can just return; nothing
* done yet, so nothing to undo.
*/
if (node->hj_HashTable == NULL)
return;
/*
* In a multi-batch join, we currently have to do rescans the hard
* way, primarily because batch temp files may have already been
* released. But if it's a single-batch join, and there is no
* parameter change for the inner subnode, then we can just re-use the
* existing hash table without rebuilding it.
* In a multi-batch join, we currently have to do rescans the hard way,
* primarily because batch temp files may have already been released. But
* if it's a single-batch join, and there is no parameter change for the
* inner subnode, then we can just re-use the existing hash table without
* rebuilding it.
*/
if (node->hj_HashTable->nbatch == 1 &&
((PlanState *) node)->righttree->chgParam == NULL)
@ -835,8 +833,8 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
node->hj_FirstOuterTupleSlot = NULL;
/*
* if chgParam of subnode is not null then plan will be re-scanned
* by first ExecProcNode.
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
*/
if (((PlanState *) node)->righttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->righttree, exprCtxt);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.103 2005/05/06 17:24:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -75,11 +75,11 @@ IndexNext(IndexScanState *node)
scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
/*
* Clear any reference to the previously returned tuple. The idea
* here is to not have the tuple slot be the last holder of a pin on
* that tuple's buffer; if it is, we'll need a separate visit to the
* bufmgr to release the buffer. By clearing here, we get to have the
* release done by ReleaseAndReadBuffer inside index_getnext.
* Clear any reference to the previously returned tuple. The idea here is
* to not have the tuple slot be the last holder of a pin on that tuple's
* buffer; if it is, we'll need a separate visit to the bufmgr to release
* the buffer. By clearing here, we get to have the release done by
* ReleaseAndReadBuffer inside index_getnext.
*/
ExecClearTuple(slot);
@ -104,7 +104,7 @@ IndexNext(IndexScanState *node)
ResetExprContext(econtext);
if (!ExecQual(node->indexqualorig, econtext, false))
ExecClearTuple(slot); /* would not be returned by scan */
ExecClearTuple(slot); /* would not be returned by scan */
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[scanrelid - 1] = true;
@ -118,22 +118,21 @@ IndexNext(IndexScanState *node)
if ((tuple = index_getnext(scandesc, direction)) != NULL)
{
/*
* Store the scanned tuple in the scan tuple slot of the scan
* state. Note: we pass 'false' because tuples returned by
* amgetnext are pointers onto disk pages and must not be
* pfree()'d.
* Store the scanned tuple in the scan tuple slot of the scan state.
* Note: we pass 'false' because tuples returned by amgetnext are
* pointers onto disk pages and must not be pfree()'d.
*/
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
scandesc->xs_cbuf, /* buffer containing tuple */
false); /* don't pfree */
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
scandesc->xs_cbuf, /* buffer containing tuple */
false); /* don't pfree */
return slot;
}
/*
* if we get here it means the index scan failed so we are at the end
* of the scan..
* if we get here it means the index scan failed so we are at the end of
* the scan..
*/
return ExecClearTuple(slot);
}
@ -146,8 +145,7 @@ TupleTableSlot *
ExecIndexScan(IndexScanState *node)
{
/*
* If we have runtime keys and they've not already been set up, do it
* now.
* If we have runtime keys and they've not already been set up, do it now.
*/
if (node->iss_RuntimeKeyInfo && !node->iss_RuntimeKeysReady)
ExecReScan((PlanState *) node, NULL);
@ -179,8 +177,7 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
Index scanrelid;
estate = node->ss.ps.state;
econtext = node->iss_RuntimeContext; /* context for runtime
* keys */
econtext = node->iss_RuntimeContext; /* context for runtime keys */
scanKeys = node->iss_ScanKeys;
runtimeKeyInfo = node->iss_RuntimeKeyInfo;
numScanKeys = node->iss_NumScanKeys;
@ -203,16 +200,16 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
}
/*
* Reset the runtime-key context so we don't leak memory as each
* outer tuple is scanned. Note this assumes that we will
* recalculate *all* runtime keys on each call.
* Reset the runtime-key context so we don't leak memory as each outer
* tuple is scanned. Note this assumes that we will recalculate *all*
* runtime keys on each call.
*/
ResetExprContext(econtext);
}
/*
* If we are doing runtime key calculations (ie, the index keys depend
* on data from an outer scan), compute the new key values
* If we are doing runtime key calculations (ie, the index keys depend on
* data from an outer scan), compute the new key values
*/
if (runtimeKeyInfo)
{
@ -251,16 +248,16 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
for (j = 0; j < n_keys; j++)
{
/*
* If we have a run-time key, then extract the run-time
* expression and evaluate it with respect to the current
* outer tuple. We then stick the result into the scan key.
* If we have a run-time key, then extract the run-time expression and
* evaluate it with respect to the current outer tuple. We then stick
* the result into the scan key.
*
* Note: the result of the eval could be a pass-by-ref value
* that's stored in the outer scan's tuple, not in
* econtext->ecxt_per_tuple_memory. We assume that the
* outer tuple will stay put throughout our scan. If this
* is wrong, we could copy the result into our context
* explicitly, but I think that's not necessary...
* Note: the result of the eval could be a pass-by-ref value that's
* stored in the outer scan's tuple, not in
* econtext->ecxt_per_tuple_memory. We assume that the outer tuple
* will stay put throughout our scan. If this is wrong, we could copy
* the result into our context explicitly, but I think that's not
* necessary...
*/
if (run_keys[j] != NULL)
{
@ -323,9 +320,8 @@ ExecEndIndexScan(IndexScanState *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
* ExecInitIndexScan. This lock should be held till end of
* transaction. (There is a faction that considers this too much
* locking, however.)
* ExecInitIndexScan. This lock should be held till end of transaction.
* (There is a faction that considers this too much locking, however.)
*/
heap_close(relation, NoLock);
}
@ -392,11 +388,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
* initialize child expressions
*
* Note: we don't initialize all of the indexqual expression, only the
* sub-parts corresponding to runtime keys (see below). The
* indexqualorig expression is always initialized even though it will
* only be used in some uncommon cases --- would be nice to improve
* that. (Problem is that any SubPlans present in the expression must
* be found now...)
* sub-parts corresponding to runtime keys (see below). The indexqualorig
* expression is always initialized even though it will only be used in
* some uncommon cases --- would be nice to improve that. (Problem is
* that any SubPlans present in the expression must be found now...)
*/
indexstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->scan.plan.targetlist,
@ -440,10 +435,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
indexstate->iss_NumScanKeys = numScanKeys;
/*
* If we have runtime keys, we need an ExprContext to evaluate them.
* The node's standard context won't do because we want to reset that
* context for every tuple. So, build another context just like the
* other one... -tgl 7/11/00
* If we have runtime keys, we need an ExprContext to evaluate them. The
* node's standard context won't do because we want to reset that context
* for every tuple. So, build another context just like the other one...
* -tgl 7/11/00
*/
if (have_runtime_keys)
{
@ -476,10 +471,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
ExecAssignScanType(&indexstate->ss, RelationGetDescr(currentRelation), false);
/*
* open the index relation and initialize relation and scan
* descriptors. Note we acquire no locks here; the index machinery
* does its own locks and unlocks. (We rely on having AccessShareLock
* on the parent table to ensure the index won't go away!)
* open the index relation and initialize relation and scan descriptors.
* Note we acquire no locks here; the index machinery does its own locks
* and unlocks. (We rely on having AccessShareLock on the parent table to
* ensure the index won't go away!)
*/
indexstate->iss_RelationDesc = index_open(node->indexid);
indexstate->iss_ScanDesc = index_beginscan(currentRelation,
@ -543,8 +538,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
(ExprState **) palloc(n_keys * sizeof(ExprState *));
/*
* for each opclause in the given qual, convert each qual's
* opclause into a single scan key
* for each opclause in the given qual, convert each qual's opclause into
* a single scan key
*/
qual_cell = list_head(quals);
strategy_cell = list_head(strategies);
@ -552,15 +547,15 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
for (j = 0; j < n_keys; j++)
{
OpExpr *clause; /* one clause of index qual */
Expr *leftop; /* expr on lhs of operator */
Expr *rightop; /* expr on rhs ... */
OpExpr *clause; /* one clause of index qual */
Expr *leftop; /* expr on lhs of operator */
Expr *rightop; /* expr on rhs ... */
int flags = 0;
AttrNumber varattno; /* att number used in scan */
AttrNumber varattno; /* att number used in scan */
StrategyNumber strategy; /* op's strategy number */
Oid subtype; /* op's strategy subtype */
RegProcedure opfuncid; /* operator proc id used in scan */
Datum scanvalue; /* value used in scan (if const) */
Oid subtype; /* op's strategy subtype */
RegProcedure opfuncid; /* operator proc id used in scan */
Datum scanvalue; /* value used in scan (if const) */
/*
* extract clause information from the qualification
@ -578,18 +573,17 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
opfuncid = clause->opfuncid;
/*
* Here we figure out the contents of the index qual. The
* usual case is (var op const) which means we form a scan key
* for the attribute listed in the var node and use the value
* of the const as comparison data.
* Here we figure out the contents of the index qual. The usual case
* is (var op const) which means we form a scan key for the attribute
* listed in the var node and use the value of the const as comparison
* data.
*
* If we don't have a const node, it means our scan key is a
* function of information obtained during the execution of
* the plan, in which case we need to recalculate the index
* scan key at run time. Hence, we set have_runtime_keys to
* true and place the appropriate subexpression in run_keys.
* The corresponding scan key values are recomputed at run
* time.
* If we don't have a const node, it means our scan key is a function of
* information obtained during the execution of the plan, in which
* case we need to recalculate the index scan key at run time. Hence,
* we set have_runtime_keys to true and place the appropriate
* subexpression in run_keys. The corresponding scan key values are
* recomputed at run time.
*/
run_keys[j] = NULL;
@ -622,8 +616,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
if (IsA(rightop, Const))
{
/*
* if the rightop is a const node then it means it
* identifies the value to place in our scan key.
* if the rightop is a const node then it means it identifies the
* value to place in our scan key.
*/
scanvalue = ((Const *) rightop)->constvalue;
if (((Const *) rightop)->constisnull)
@ -632,9 +626,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
else
{
/*
* otherwise, the rightop contains an expression evaluable
* at runtime to figure out the value to place in our scan
* key.
* otherwise, the rightop contains an expression evaluable at
* runtime to figure out the value to place in our scan key.
*/
have_runtime_keys = true;
run_keys[j] = ExecInitExpr(rightop, planstate);
@ -646,11 +639,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
*/
ScanKeyEntryInitialize(&scan_keys[j],
flags,
varattno, /* attribute number to scan */
strategy, /* op's strategy */
subtype, /* strategy subtype */
opfuncid, /* reg proc to use */
scanvalue); /* constant */
varattno, /* attribute number to scan */
strategy, /* op's strategy */
subtype, /* strategy subtype */
opfuncid, /* reg proc to use */
scanvalue); /* constant */
}
/* If no runtime keys, get rid of speculatively-allocated array */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.21 2005/03/16 21:38:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.22 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -61,10 +61,9 @@ ExecLimit(LimitState *node)
return NULL;
/*
* First call for this scan, so compute limit/offset. (We
* can't do this any earlier, because parameters from upper
* nodes may not be set until now.) This also sets position =
* 0.
* First call for this scan, so compute limit/offset. (We can't do
* this any earlier, because parameters from upper nodes may not
* be set until now.) This also sets position = 0.
*/
recompute_limits(node);
@ -86,8 +85,8 @@ ExecLimit(LimitState *node)
if (TupIsNull(slot))
{
/*
* The subplan returns too few tuples for us to
* produce any output at all.
* The subplan returns too few tuples for us to produce
* any output at all.
*/
node->lstate = LIMIT_EMPTY;
return NULL;
@ -115,11 +114,10 @@ ExecLimit(LimitState *node)
if (ScanDirectionIsForward(direction))
{
/*
* Forwards scan, so check for stepping off end of window.
* If we are at the end of the window, return NULL without
* advancing the subplan or the position variable; but
* change the state machine state to record having done
* so.
* Forwards scan, so check for stepping off end of window. If
* we are at the end of the window, return NULL without
* advancing the subplan or the position variable; but change
* the state machine state to record having done so.
*/
if (!node->noCount &&
node->position >= node->offset + node->count)
@ -143,9 +141,8 @@ ExecLimit(LimitState *node)
else
{
/*
* Backwards scan, so check for stepping off start of
* window. As above, change only state-machine status if
* so.
* Backwards scan, so check for stepping off start of window.
* As above, change only state-machine status if so.
*/
if (node->position <= node->offset + 1)
{
@ -169,9 +166,8 @@ ExecLimit(LimitState *node)
return NULL;
/*
* Backing up from subplan EOF, so re-fetch previous tuple;
* there should be one! Note previous tuple must be in
* window.
* Backing up from subplan EOF, so re-fetch previous tuple; there
* should be one! Note previous tuple must be in window.
*/
slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
@ -328,8 +324,8 @@ ExecInitLimit(Limit *node, EState *estate)
outerPlanState(limitstate) = ExecInitNode(outerPlan, estate);
/*
* limit nodes do no projections, so initialize projection info for
* this node appropriately
* limit nodes do no projections, so initialize projection info for this
* node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&limitstate->ps);
limitstate->ps.ps_ProjInfo = NULL;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.49 2005/03/16 21:38:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.50 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -68,8 +68,8 @@ ExecMaterial(MaterialState *node)
}
/*
* If we are not at the end of the tuplestore, or are going backwards,
* try to fetch a tuple from tuplestore.
* If we are not at the end of the tuplestore, or are going backwards, try
* to fetch a tuple from tuplestore.
*/
eof_tuplestore = tuplestore_ateof(tuplestorestate);
@ -79,9 +79,9 @@ ExecMaterial(MaterialState *node)
{
/*
* When reversing direction at tuplestore EOF, the first
* getheaptuple call will fetch the last-added tuple; but we
* want to return the one before that, if possible. So do an
* extra fetch.
* getheaptuple call will fetch the last-added tuple; but we want
* to return the one before that, if possible. So do an extra
* fetch.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
forward,
@ -106,10 +106,10 @@ ExecMaterial(MaterialState *node)
/*
* If necessary, try to fetch another row from the subplan.
*
* Note: the eof_underlying state variable exists to short-circuit
* further subplan calls. It's not optional, unfortunately, because
* some plan node types are not robust about being called again when
* they've already returned NULL.
* Note: the eof_underlying state variable exists to short-circuit further
* subplan calls. It's not optional, unfortunately, because some plan
* node types are not robust about being called again when they've already
* returned NULL.
*/
if (eof_tuplestore && !node->eof_underlying)
{
@ -117,8 +117,8 @@ ExecMaterial(MaterialState *node)
TupleTableSlot *outerslot;
/*
* We can only get here with forward==true, so no need to worry
* about which direction the subplan will go.
* We can only get here with forward==true, so no need to worry about
* which direction the subplan will go.
*/
outerNode = outerPlanState(node);
outerslot = ExecProcNode(outerNode);
@ -132,8 +132,8 @@ ExecMaterial(MaterialState *node)
/*
* Append returned tuple to tuplestore, too. NOTE: because the
* tuplestore is certainly in EOF state, its read position will
* move forward over the added tuple. This is what we want.
* tuplestore is certainly in EOF state, its read position will move
* forward over the added tuple. This is what we want.
*/
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
}
@ -192,8 +192,8 @@ ExecInitMaterial(Material *node, EState *estate)
outerPlanState(matstate) = ExecInitNode(outerPlan, estate);
/*
* initialize tuple type. no need to initialize projection info
* because this node doesn't do projections.
* initialize tuple type. no need to initialize projection info because
* this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan(&matstate->ss.ps);
ExecAssignScanTypeFromOuterPlan(&matstate->ss);
@ -284,9 +284,9 @@ void
ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
{
/*
* If we haven't materialized yet, just return. If outerplan' chgParam
* is not NULL then it will be re-scanned by ExecProcNode, else - no
* reason to re-scan it at all.
* If we haven't materialized yet, just return. If outerplan' chgParam is
* not NULL then it will be re-scanned by ExecProcNode, else - no reason
* to re-scan it at all.
*/
if (!node->tuplestorestate)
return;
@ -294,11 +294,11 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
/*
* If subnode is to be rescanned then we forget previous stored
* results; we have to re-read the subplan and re-store.
* If subnode is to be rescanned then we forget previous stored results;
* we have to re-read the subplan and re-store.
*
* Otherwise we can just rewind and rescan the stored output. The state
* of the subnode does not change.
* Otherwise we can just rewind and rescan the stored output. The state of
* the subnode does not change.
*/
if (((PlanState *) node)->lefttree->chgParam != NULL)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.74 2005/05/15 21:19:55 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -40,7 +40,7 @@
* matching tuple and so on.
*
* Therefore, when initializing the merge-join node, we look up the
* associated sort operators. We assume the planner has seen to it
* associated sort operators. We assume the planner has seen to it
* that the inputs are correctly sorted by these operators. Rather
* than directly executing the merge join clauses, we evaluate the
* left and right key expressions separately and then compare the
@ -124,30 +124,33 @@ typedef enum
typedef struct MergeJoinClauseData
{
/* Executable expression trees */
ExprState *lexpr; /* left-hand (outer) input expression */
ExprState *rexpr; /* right-hand (inner) input expression */
ExprState *lexpr; /* left-hand (outer) input expression */
ExprState *rexpr; /* right-hand (inner) input expression */
/*
* If we have a current left or right input tuple, the values of the
* expressions are loaded into these fields:
*/
Datum ldatum; /* current left-hand value */
Datum rdatum; /* current right-hand value */
bool lisnull; /* and their isnull flags */
bool risnull;
Datum ldatum; /* current left-hand value */
Datum rdatum; /* current right-hand value */
bool lisnull; /* and their isnull flags */
bool risnull;
/*
* Remember whether mergejoin operator is strict (usually it will be).
* NOTE: if it's not strict, we still assume it cannot return true for
* one null and one non-null input.
* NOTE: if it's not strict, we still assume it cannot return true for one
* null and one non-null input.
*/
bool mergestrict;
bool mergestrict;
/*
* The comparison strategy in use, and the lookup info to let us call
* the needed comparison routines. eqfinfo is the "=" operator itself.
* The comparison strategy in use, and the lookup info to let us call the
* needed comparison routines. eqfinfo is the "=" operator itself.
* cmpfinfo is either the btree comparator or the "<" operator.
*/
MergeFunctionKind cmpstrategy;
FmgrInfo eqfinfo;
FmgrInfo cmpfinfo;
FmgrInfo eqfinfo;
FmgrInfo cmpfinfo;
} MergeJoinClauseData;
@ -167,8 +170,8 @@ typedef struct MergeJoinClauseData
*
* The best, most efficient way to compare two expressions is to use a btree
* comparison support routine, since that requires only one function call
* per comparison. Hence we try to find a btree opclass that matches the
* mergejoinable operator. If we cannot find one, we'll have to call both
* per comparison. Hence we try to find a btree opclass that matches the
* mergejoinable operator. If we cannot find one, we'll have to call both
* the "=" and (often) the "<" operator for each comparison.
*/
static MergeJoinClause
@ -204,8 +207,8 @@ MJExamineQuals(List *qualList, PlanState *parent)
clause->rexpr = ExecInitExpr((Expr *) lsecond(qual->args), parent);
/*
* Check permission to call the mergejoinable operator.
* For predictability, we check this even if we end up not using it.
* Check permission to call the mergejoinable operator. For
* predictability, we check this even if we end up not using it.
*/
aclresult = pg_proc_aclcheck(qual->opfuncid, GetUserId(), ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
@ -220,7 +223,7 @@ MJExamineQuals(List *qualList, PlanState *parent)
/*
* Lookup the comparison operators that go with the mergejoinable
* top-level operator. (This will elog if the operator isn't
* top-level operator. (This will elog if the operator isn't
* mergejoinable, which would be the planner's mistake.)
*/
op_mergejoin_crossops(qual->opno,
@ -232,13 +235,12 @@ MJExamineQuals(List *qualList, PlanState *parent)
clause->cmpstrategy = MERGEFUNC_LT;
/*
* Look for a btree opclass including all three operators.
* This is much like SelectSortFunction except we insist on
* matching all the operators provided, and it can be a cross-type
* opclass.
* Look for a btree opclass including all three operators. This is
* much like SelectSortFunction except we insist on matching all the
* operators provided, and it can be a cross-type opclass.
*
* XXX for now, insist on forward sort so that NULLs can be counted
* on to be high.
* XXX for now, insist on forward sort so that NULLs can be counted on to
* be high.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(qual->opno),
@ -255,13 +257,13 @@ MJExamineQuals(List *qualList, PlanState *parent)
if (!opclass_is_btree(opcid))
continue;
if (get_op_opclass_strategy(ltop, opcid) == BTLessStrategyNumber &&
get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber)
get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber)
{
clause->cmpstrategy = MERGEFUNC_CMP;
ltproc = get_opclass_proc(opcid, aform->amopsubtype,
BTORDER_PROC);
Assert(RegProcedureIsValid(ltproc));
break; /* done looking */
break; /* done looking */
}
}
@ -325,7 +327,7 @@ MJEvalOuterValues(MergeJoinState *mergestate)
/*
* MJEvalInnerValues
*
* Same as above, but for the inner tuple. Here, we have to be prepared
* Same as above, but for the inner tuple. Here, we have to be prepared
* to load data from either the true current inner, or the marked inner,
* so caller must tell us which slot to load from.
*/
@ -379,8 +381,8 @@ MJCompare(MergeJoinState *mergestate)
FunctionCallInfoData fcinfo;
/*
* Call the comparison functions in short-lived context, in case they
* leak memory.
* Call the comparison functions in short-lived context, in case they leak
* memory.
*/
ResetExprContext(econtext);
@ -394,11 +396,11 @@ MJCompare(MergeJoinState *mergestate)
/*
* Deal with null inputs. We treat NULL as sorting after non-NULL.
*
* If both inputs are NULL, and the comparison function isn't
* strict, then we call it and check for a true result (this allows
* operators that behave like IS NOT DISTINCT to be mergejoinable).
* If the function is strict or returns false, we temporarily
* pretend NULL == NULL and contine checking remaining columns.
* If both inputs are NULL, and the comparison function isn't strict,
* then we call it and check for a true result (this allows operators
* that behave like IS NOT DISTINCT to be mergejoinable). If the
* function is strict or returns false, we temporarily pretend NULL ==
* NULL and contine checking remaining columns.
*/
if (clause->lisnull)
{
@ -477,7 +479,8 @@ MJCompare(MergeJoinState *mergestate)
break;
}
}
else /* must be MERGEFUNC_CMP */
else
/* must be MERGEFUNC_CMP */
{
InitFunctionCallInfoData(fcinfo, &(clause->cmpfinfo), 2,
NULL, NULL);
@ -512,10 +515,10 @@ MJCompare(MergeJoinState *mergestate)
}
/*
* If we had any null comparison results or NULL-vs-NULL inputs,
* we do not want to report that the tuples are equal. Instead,
* if result is still 0, change it to +1. This will result in
* advancing the inner side of the join.
* If we had any null comparison results or NULL-vs-NULL inputs, we do not
* want to report that the tuples are equal. Instead, if result is still
* 0, change it to +1. This will result in advancing the inner side of
* the join.
*/
if (nulleqnull && result == 0)
result = 1;
@ -544,8 +547,8 @@ MJFillOuter(MergeJoinState *node)
if (ExecQual(otherqual, econtext, false))
{
/*
* qualification succeeded. now form the desired projection tuple
* and return the slot containing it.
* qualification succeeded. now form the desired projection tuple and
* return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@ -583,8 +586,8 @@ MJFillInner(MergeJoinState *node)
if (ExecQual(otherqual, econtext, false))
{
/*
* qualification succeeded. now form the desired projection tuple
* and return the slot containing it.
* qualification succeeded. now form the desired projection tuple and
* return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@ -696,9 +699,9 @@ ExecMergeJoin(MergeJoinState *node)
doFillInner = node->mj_FillInner;
/*
* Check to see if we're still projecting out tuples from a previous
* join tuple (because there is a function-returning-set in the
* projection expressions). If so, try to project another one.
* Check to see if we're still projecting out tuples from a previous join
* tuple (because there is a function-returning-set in the projection
* expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
@ -714,8 +717,8 @@ ExecMergeJoin(MergeJoinState *node)
/*
* Reset per-tuple memory context to free any expression evaluation
* storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* storage allocated in the previous tuple cycle. Note this can't happen
* until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
@ -733,10 +736,10 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* EXEC_MJ_INITIALIZE_OUTER means that this is the first time
* ExecMergeJoin() has been called and so we have to fetch
* the first matchable tuple for both outer and inner subplans.
* We do the outer side in INITIALIZE_OUTER state, then
* advance to INITIALIZE_INNER state for the inner subplan.
* ExecMergeJoin() has been called and so we have to fetch the
* first matchable tuple for both outer and inner subplans. We
* do the outer side in INITIALIZE_OUTER state, then advance
* to INITIALIZE_INNER state for the inner subplan.
*/
case EXEC_MJ_INITIALIZE_OUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_INITIALIZE_OUTER\n");
@ -749,9 +752,9 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillInner)
{
/*
* Need to emit right-join tuples for remaining
* inner tuples. We set MatchedInner = true to
* force the ENDOUTER state to advance inner.
* Need to emit right-join tuples for remaining inner
* tuples. We set MatchedInner = true to force the
* ENDOUTER state to advance inner.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
node->mj_MatchedInner = true;
@ -797,11 +800,10 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillOuter)
{
/*
* Need to emit left-join tuples for all outer
* tuples, including the one we just fetched. We
* set MatchedOuter = false to force the ENDINNER
* state to emit first tuple before advancing
* outer.
* Need to emit left-join tuples for all outer tuples,
* including the one we just fetched. We set
* MatchedOuter = false to force the ENDINNER state to
* emit first tuple before advancing outer.
*/
node->mj_JoinState = EXEC_MJ_ENDINNER;
node->mj_MatchedOuter = false;
@ -840,9 +842,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
* EXEC_MJ_JOINTUPLES means we have two tuples which
* satisfied the merge clause so we join them and then
* proceed to get the next inner tuple (EXEC_MJ_NEXTINNER).
* EXEC_MJ_JOINTUPLES means we have two tuples which satisfied
* the merge clause so we join them and then proceed to get
* the next inner tuple (EXEC_MJ_NEXTINNER).
*/
case EXEC_MJ_JOINTUPLES:
MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTUPLES\n");
@ -855,18 +857,18 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_JoinState = EXEC_MJ_NEXTINNER;
/*
* Check the extra qual conditions to see if we actually
* want to return this join tuple. If not, can proceed
* with merge. We must distinguish the additional
* joinquals (which must pass to consider the tuples
* "matched" for outer-join logic) from the otherquals
* (which must pass before we actually return the tuple).
* Check the extra qual conditions to see if we actually want
* to return this join tuple. If not, can proceed with merge.
* We must distinguish the additional joinquals (which must
* pass to consider the tuples "matched" for outer-join logic)
* from the otherquals (which must pass before we actually
* return the tuple).
*
* We don't bother with a ResetExprContext here, on the
* assumption that we just did one while checking the
* merge qual. One per tuple should be sufficient. We
* do have to set up the econtext links to the tuples
* for ExecQual to use.
* assumption that we just did one while checking the merge
* qual. One per tuple should be sufficient. We do have to
* set up the econtext links to the tuples for ExecQual to
* use.
*/
outerTupleSlot = node->mj_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
@ -896,8 +898,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* qualification succeeded. now form the desired
* projection tuple and return the slot containing
* it.
* projection tuple and return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@ -918,9 +919,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
* EXEC_MJ_NEXTINNER means advance the inner scan to the
* next tuple. If the tuple is not nil, we then proceed to
* test it against the join qualification.
* EXEC_MJ_NEXTINNER means advance the inner scan to the next
* tuple. If the tuple is not nil, we then proceed to test it
* against the join qualification.
*
* Before advancing, we check to see if we must emit an
* outer-join fill tuple for this inner tuple.
@ -932,8 +933,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the outer
* tuple, and return it if it passes the non-join
* quals.
* tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@ -945,12 +945,12 @@ ExecMergeJoin(MergeJoinState *node)
}
/*
* now we get the next inner tuple, if any. If there's
* none, advance to next outer tuple (which may be able
* to join to previously marked tuples).
* now we get the next inner tuple, if any. If there's none,
* advance to next outer tuple (which may be able to join to
* previously marked tuples).
*
* If we find one but it cannot join to anything, stay
* in NEXTINNER state to fetch the next one.
* If we find one but it cannot join to anything, stay in
* NEXTINNER state to fetch the next one.
*/
innerTupleSlot = ExecProcNode(innerPlan);
node->mj_InnerTupleSlot = innerTupleSlot;
@ -969,8 +969,8 @@ ExecMergeJoin(MergeJoinState *node)
/*
* Test the new inner tuple to see if it matches outer.
*
* If they do match, then we join them and move on to the
* next inner tuple (EXEC_MJ_JOINTUPLES).
* If they do match, then we join them and move on to the next
* inner tuple (EXEC_MJ_JOINTUPLES).
*
* If they do not match then advance to next outer tuple.
*/
@ -1013,8 +1013,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the inner
* tuple, and return it if it passes the non-join
* quals.
* tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@ -1034,8 +1033,8 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_MatchedOuter = false;
/*
* if the outer tuple is null then we are done with the
* join, unless we have inner tuples we need to null-fill.
* if the outer tuple is null then we are done with the join,
* unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
@ -1044,8 +1043,8 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillInner && !TupIsNull(innerTupleSlot))
{
/*
* Need to emit right-join tuples for remaining
* inner tuples.
* Need to emit right-join tuples for remaining inner
* tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
break;
@ -1118,26 +1117,25 @@ ExecMergeJoin(MergeJoinState *node)
if (compareResult == 0)
{
/*
* the merge clause matched so now we restore the
* inner scan position to the first mark, and go join
* that tuple (and any following ones) to the new outer.
* the merge clause matched so now we restore the inner
* scan position to the first mark, and go join that tuple
* (and any following ones) to the new outer.
*
* NOTE: we do not need to worry about the MatchedInner
* state for the rescanned inner tuples. We know all
* of them will match this new outer tuple and
* therefore won't be emitted as fill tuples. This
* works *only* because we require the extra joinquals
* to be nil when doing a right or full join ---
* otherwise some of the rescanned tuples might fail
* the extra joinquals.
* NOTE: we do not need to worry about the MatchedInner state
* for the rescanned inner tuples. We know all of them
* will match this new outer tuple and therefore won't be
* emitted as fill tuples. This works *only* because we
* require the extra joinquals to be nil when doing a
* right or full join --- otherwise some of the rescanned
* tuples might fail the extra joinquals.
*/
ExecRestrPos(innerPlan);
/*
* ExecRestrPos probably should give us back a new Slot,
* but since it doesn't, use the marked slot. (The
* previously returned mj_InnerTupleSlot cannot be
* assumed to hold the required tuple.)
* previously returned mj_InnerTupleSlot cannot be assumed
* to hold the required tuple.)
*/
node->mj_InnerTupleSlot = innerTupleSlot;
/* we need not do MJEvalInnerValues again */
@ -1159,7 +1157,7 @@ ExecMergeJoin(MergeJoinState *node)
* which means that all subsequent outer tuples will be
* larger than our marked inner tuples. So we need not
* revisit any of the marked tuples but can proceed to
* look for a match to the current inner. If there's
* look for a match to the current inner. If there's
* no more inners, we are done.
* ----------------
*/
@ -1222,8 +1220,8 @@ ExecMergeJoin(MergeJoinState *node)
/*
* before we advance, make sure the current tuples do not
* satisfy the mergeclauses. If they do, then we update
* the marked tuple position and go join them.
* satisfy the mergeclauses. If they do, then we update the
* marked tuple position and go join them.
*/
compareResult = MJCompare(node);
MJ_DEBUG_COMPARE(compareResult);
@ -1238,7 +1236,8 @@ ExecMergeJoin(MergeJoinState *node)
}
else if (compareResult < 0)
node->mj_JoinState = EXEC_MJ_SKIPOUTER_ADVANCE;
else /* compareResult > 0 */
else
/* compareResult > 0 */
node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE;
break;
@ -1253,8 +1252,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the inner
* tuple, and return it if it passes the non-join
* quals.
* tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@ -1274,8 +1272,8 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_MatchedOuter = false;
/*
* if the outer tuple is null then we are done with the
* join, unless we have inner tuples we need to null-fill.
* if the outer tuple is null then we are done with the join,
* unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
@ -1284,8 +1282,8 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillInner && !TupIsNull(innerTupleSlot))
{
/*
* Need to emit right-join tuples for remaining
* inner tuples.
* Need to emit right-join tuples for remaining inner
* tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
break;
@ -1317,8 +1315,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the outer
* tuple, and return it if it passes the non-join
* quals.
* tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@ -1338,8 +1335,8 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_MatchedInner = false;
/*
* if the inner tuple is null then we are done with the
* join, unless we have outer tuples we need to null-fill.
* if the inner tuple is null then we are done with the join,
* unless we have outer tuples we need to null-fill.
*/
if (TupIsNull(innerTupleSlot))
{
@ -1348,8 +1345,8 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillOuter && !TupIsNull(outerTupleSlot))
{
/*
* Need to emit left-join tuples for remaining
* outer tuples.
* Need to emit left-join tuples for remaining outer
* tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDINNER;
break;
@ -1371,9 +1368,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
* EXEC_MJ_ENDOUTER means we have run out of outer tuples,
* but are doing a right/full join and therefore must
* null-fill any remaing unmatched inner tuples.
* EXEC_MJ_ENDOUTER means we have run out of outer tuples, but
* are doing a right/full join and therefore must null-fill
* any remaing unmatched inner tuples.
*/
case EXEC_MJ_ENDOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDOUTER\n");
@ -1384,8 +1381,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the outer
* tuple, and return it if it passes the non-join
* quals.
* tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@ -1414,9 +1410,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
* EXEC_MJ_ENDINNER means we have run out of inner tuples,
* but are doing a left/full join and therefore must null-
* fill any remaing unmatched outer tuples.
* EXEC_MJ_ENDINNER means we have run out of inner tuples, but
* are doing a left/full join and therefore must null- fill
* any remaing unmatched outer tuples.
*/
case EXEC_MJ_ENDINNER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDINNER\n");
@ -1427,8 +1423,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the inner
* tuple, and return it if it passes the non-join
* quals.
* tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@ -1493,10 +1488,9 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
ExecAssignExprContext(estate, &mergestate->js.ps);
/*
* we need two additional econtexts in which we can compute the
* join expressions from the left and right input tuples. The
* node's regular econtext won't do because it gets reset too
* often.
* we need two additional econtexts in which we can compute the join
* expressions from the left and right input tuples. The node's regular
* econtext won't do because it gets reset too often.
*/
mergestate->mj_OuterEContext = CreateExprContext(estate);
mergestate->mj_InnerEContext = CreateExprContext(estate);
@ -1546,18 +1540,18 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
mergestate->mj_FillInner = false;
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
ExecGetResultType(innerPlanState(mergestate)));
ExecGetResultType(innerPlanState(mergestate)));
break;
case JOIN_RIGHT:
mergestate->mj_FillOuter = false;
mergestate->mj_FillInner = true;
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
ExecGetResultType(outerPlanState(mergestate)));
ExecGetResultType(outerPlanState(mergestate)));
/*
* Can't handle right or full join with non-nil extra
* joinclauses. This should have been caught by planner.
* Can't handle right or full join with non-nil extra joinclauses.
* This should have been caught by planner.
*/
if (node->join.joinqual != NIL)
ereport(ERROR,
@ -1569,14 +1563,13 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
mergestate->mj_FillInner = true;
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
ExecGetResultType(outerPlanState(mergestate)));
ExecGetResultType(outerPlanState(mergestate)));
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
ExecGetResultType(innerPlanState(mergestate)));
ExecGetResultType(innerPlanState(mergestate)));
/*
* Can't handle right or full join with non-nil extra
* joinclauses.
* Can't handle right or full join with non-nil extra joinclauses.
*/
if (node->join.joinqual != NIL)
ereport(ERROR,
@ -1675,8 +1668,8 @@ ExecReScanMergeJoin(MergeJoinState *node, ExprContext *exprCtxt)
node->mj_InnerTupleSlot = NULL;
/*
* if chgParam of subnodes is not null then plans will be re-scanned
* by first ExecProcNode.
* if chgParam of subnodes is not null then plans will be re-scanned by
* first ExecProcNode.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->lefttree, exprCtxt);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.38 2004/12/31 21:59:45 pgsql Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -85,9 +85,9 @@ ExecNestLoop(NestLoopState *node)
econtext->ecxt_outertuple = outerTupleSlot;
/*
* Check to see if we're still projecting out tuples from a previous
* join tuple (because there is a function-returning-set in the
* projection expressions). If so, try to project another one.
* Check to see if we're still projecting out tuples from a previous join
* tuple (because there is a function-returning-set in the projection
* expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
@ -102,9 +102,9 @@ ExecNestLoop(NestLoopState *node)
}
/*
* If we're doing an IN join, we want to return at most one row per
* outer tuple; so we can stop scanning the inner scan if we matched
* on the previous try.
* If we're doing an IN join, we want to return at most one row per outer
* tuple; so we can stop scanning the inner scan if we matched on the
* previous try.
*/
if (node->js.jointype == JOIN_IN &&
node->nl_MatchedOuter)
@ -112,8 +112,8 @@ ExecNestLoop(NestLoopState *node)
/*
* Reset per-tuple memory context to free any expression evaluation
* storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* storage allocated in the previous tuple cycle. Note this can't happen
* until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
@ -135,8 +135,7 @@ ExecNestLoop(NestLoopState *node)
outerTupleSlot = ExecProcNode(outerPlan);
/*
* if there are no more outer tuples, then the join is
* complete..
* if there are no more outer tuples, then the join is complete..
*/
if (TupIsNull(outerTupleSlot))
{
@ -157,8 +156,8 @@ ExecNestLoop(NestLoopState *node)
/*
* The scan key of the inner plan might depend on the current
* outer tuple (e.g. in index scans), that's why we pass our
* expr context.
* outer tuple (e.g. in index scans), that's why we pass our expr
* context.
*/
ExecReScan(innerPlan, econtext);
}
@ -181,10 +180,10 @@ ExecNestLoop(NestLoopState *node)
node->js.jointype == JOIN_LEFT)
{
/*
* We are doing an outer join and there were no join
* matches for this outer tuple. Generate a fake join
* tuple with nulls for the inner tuple, and return it if
* it passes the non-join quals.
* We are doing an outer join and there were no join matches
* for this outer tuple. Generate a fake join tuple with
* nulls for the inner tuple, and return it if it passes the
* non-join quals.
*/
econtext->ecxt_innertuple = node->nl_NullInnerTupleSlot;
@ -193,8 +192,8 @@ ExecNestLoop(NestLoopState *node)
if (ExecQual(otherqual, econtext, false))
{
/*
* qualification was satisfied so we project and
* return the slot containing the result tuple using
* qualification was satisfied so we project and return
* the slot containing the result tuple using
* ExecProject().
*/
TupleTableSlot *result;
@ -220,12 +219,12 @@ ExecNestLoop(NestLoopState *node)
}
/*
* at this point we have a new pair of inner and outer tuples so
* we test the inner and outer tuples to see if they satisfy the
* node's qualification.
* at this point we have a new pair of inner and outer tuples so we
* test the inner and outer tuples to see if they satisfy the node's
* qualification.
*
* Only the joinquals determine MatchedOuter status, but all quals
* must pass to actually return the tuple.
* Only the joinquals determine MatchedOuter status, but all quals must
* pass to actually return the tuple.
*/
ENL1_printf("testing qualification");
@ -236,9 +235,8 @@ ExecNestLoop(NestLoopState *node)
if (otherqual == NIL || ExecQual(otherqual, econtext, false))
{
/*
* qualification was satisfied so we project and return
* the slot containing the result tuple using
* ExecProject().
* qualification was satisfied so we project and return the
* slot containing the result tuple using ExecProject().
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@ -330,7 +328,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate)
case JOIN_LEFT:
nlstate->nl_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
ExecGetResultType(innerPlanState(nlstate)));
ExecGetResultType(innerPlanState(nlstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@ -408,10 +406,9 @@ ExecReScanNestLoop(NestLoopState *node, ExprContext *exprCtxt)
/*
* If outerPlan->chgParam is not null then plan will be automatically
* re-scanned by first ExecProcNode. innerPlan is re-scanned for each
* new outer tuple and MUST NOT be re-scanned from here or you'll get
* troubles from inner index scans when outer Vars are used as
* run-time keys...
* re-scanned by first ExecProcNode. innerPlan is re-scanned for each new
* outer tuple and MUST NOT be re-scanned from here or you'll get troubles
* from inner index scans when outer Vars are used as run-time keys...
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan, exprCtxt);

View File

@ -38,7 +38,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.31 2005/04/24 15:32:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.32 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -92,9 +92,9 @@ ExecResult(ResultState *node)
}
/*
* Check to see if we're still projecting out tuples from a previous
* scan tuple (because there is a function-returning-set in the
* projection expressions). If so, try to project another one.
* Check to see if we're still projecting out tuples from a previous scan
* tuple (because there is a function-returning-set in the projection
* expressions). If so, try to project another one.
*/
if (node->ps.ps_TupFromTlist)
{
@ -107,16 +107,16 @@ ExecResult(ResultState *node)
/*
* Reset per-tuple memory context to free any expression evaluation
* storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a scan tuple.
* storage allocated in the previous tuple cycle. Note this can't happen
* until we're done projecting out tuples from a scan tuple.
*/
ResetExprContext(econtext);
/*
* if rs_done is true then it means that we were asked to return a
* constant tuple and we already did the last time ExecResult() was
* called, OR that we failed the constant qual check. Either way, now
* we are through.
* called, OR that we failed the constant qual check. Either way, now we
* are through.
*/
while (!node->rs_done)
{
@ -125,8 +125,7 @@ ExecResult(ResultState *node)
if (outerPlan != NULL)
{
/*
* retrieve tuples from the outer plan until there are no
* more.
* retrieve tuples from the outer plan until there are no more.
*/
outerTupleSlot = ExecProcNode(outerPlan);
@ -136,8 +135,7 @@ ExecResult(ResultState *node)
node->ps.ps_OuterTupleSlot = outerTupleSlot;
/*
* XXX gross hack. use outer tuple as scan tuple for
* projection
* XXX gross hack. use outer tuple as scan tuple for projection
*/
econtext->ecxt_outertuple = outerTupleSlot;
econtext->ecxt_scantuple = outerTupleSlot;
@ -145,16 +143,16 @@ ExecResult(ResultState *node)
else
{
/*
* if we don't have an outer plan, then we are just generating
* the results from a constant target list. Do it only once.
* if we don't have an outer plan, then we are just generating the
* results from a constant target list. Do it only once.
*/
node->rs_done = true;
}
/*
* form the result tuple using ExecProject(), and return it ---
* unless the projection produces an empty set, in which case we
* must loop back to see if there are more outerPlan tuples.
* form the result tuple using ExecProject(), and return it --- unless
* the projection produces an empty set, in which case we must loop
* back to see if there are more outerPlan tuples.
*/
resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.53 2005/05/15 21:19:55 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.54 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -62,11 +62,11 @@ SeqNext(SeqScanState *node)
slot = node->ss_ScanTupleSlot;
/*
* Clear any reference to the previously returned tuple. The idea
* here is to not have the tuple slot be the last holder of a pin on
* that tuple's buffer; if it is, we'll need a separate visit to the
* bufmgr to release the buffer. By clearing here, we get to have the
* release done by ReleaseAndReadBuffer inside heap_getnext.
* Clear any reference to the previously returned tuple. The idea here is
* to not have the tuple slot be the last holder of a pin on that tuple's
* buffer; if it is, we'll need a separate visit to the bufmgr to release
* the buffer. By clearing here, we get to have the release done by
* ReleaseAndReadBuffer inside heap_getnext.
*/
ExecClearTuple(slot);
@ -87,8 +87,8 @@ SeqNext(SeqScanState *node)
/*
* Note that unlike IndexScan, SeqScan never use keys in
* heap_beginscan (and this is very bad) - so, here we do not
* check are keys ok or not.
* heap_beginscan (and this is very bad) - so, here we do not check
* are keys ok or not.
*/
/* Flag for the next call that no more tuples */
@ -102,20 +102,19 @@ SeqNext(SeqScanState *node)
tuple = heap_getnext(scandesc, direction);
/*
* save the tuple and the buffer returned to us by the access methods
* in our scan tuple slot and return the slot. Note: we pass 'false'
* because tuples returned by heap_getnext() are pointers onto disk
* pages and were not created with palloc() and so should not be
* pfree()'d. Note also that ExecStoreTuple will increment the
* refcount of the buffer; the refcount will not be dropped until the
* tuple table slot is cleared.
* save the tuple and the buffer returned to us by the access methods in
* our scan tuple slot and return the slot. Note: we pass 'false' because
* tuples returned by heap_getnext() are pointers onto disk pages and were
* not created with palloc() and so should not be pfree()'d. Note also
* that ExecStoreTuple will increment the refcount of the buffer; the
* refcount will not be dropped until the tuple table slot is cleared.
*/
if (tuple)
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
scandesc->rs_cbuf, /* buffer associated with
* this tuple */
false); /* don't pfree this pointer */
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
scandesc->rs_cbuf, /* buffer associated with this
* tuple */
false); /* don't pfree this pointer */
return slot;
}
@ -157,8 +156,8 @@ InitScanRelation(SeqScanState *node, EState *estate)
HeapScanDesc currentScanDesc;
/*
* get the relation object id from the relid'th entry in the range
* table, open that relation and initialize the scan state.
* get the relation object id from the relid'th entry in the range table,
* open that relation and initialize the scan state.
*
* We acquire AccessShareLock for the duration of the scan.
*/
@ -191,8 +190,8 @@ ExecInitSeqScan(SeqScan *node, EState *estate)
SeqScanState *scanstate;
/*
* Once upon a time it was possible to have an outerPlan of a SeqScan,
* but not any more.
* Once upon a time it was possible to have an outerPlan of a SeqScan, but
* not any more.
*/
Assert(outerPlan(node) == NULL);
Assert(innerPlan(node) == NULL);
@ -291,9 +290,8 @@ ExecEndSeqScan(SeqScanState *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
* InitScanRelation. This lock should be held till end of
* transaction. (There is a faction that considers this too much
* locking, however.)
* InitScanRelation. This lock should be held till end of transaction.
* (There is a faction that considers this too much locking, however.)
*/
heap_close(relation, NoLock);
}
@ -359,10 +357,10 @@ ExecSeqRestrPos(SeqScanState *node)
HeapScanDesc scan = node->ss_currentScanDesc;
/*
* Clear any reference to the previously returned tuple. This is
* needed because the slot is simply pointing at scan->rs_cbuf, which
* heap_restrpos will change; we'd have an internally inconsistent
* slot if we didn't do this.
* Clear any reference to the previously returned tuple. This is needed
* because the slot is simply pointing at scan->rs_cbuf, which
* heap_restrpos will change; we'd have an internally inconsistent slot if
* we didn't do this.
*/
ExecClearTuple(node->ss_ScanTupleSlot);

View File

@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.17 2005/05/06 17:24:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.18 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -58,8 +58,8 @@ ExecSetOp(SetOpState *node)
resultTupleSlot = node->ps.ps_ResultTupleSlot;
/*
* If the previously-returned tuple needs to be returned more than
* once, keep returning it.
* If the previously-returned tuple needs to be returned more than once,
* keep returning it.
*/
if (node->numOutput > 0)
{
@ -71,9 +71,9 @@ ExecSetOp(SetOpState *node)
ExecClearTuple(resultTupleSlot);
/*
* Absorb groups of duplicate tuples, counting them, and saving the
* first of each group as a possible return value. At the end of each
* group, decide whether to return anything.
* Absorb groups of duplicate tuples, counting them, and saving the first
* of each group as a possible return value. At the end of each group,
* decide whether to return anything.
*
* We assume that the tuples arrive in sorted order so we can detect
* duplicates easily.
@ -177,8 +177,8 @@ ExecSetOp(SetOpState *node)
else
{
/*
* Current tuple is member of same group as resultTuple. Count
* it in the appropriate counter.
* Current tuple is member of same group as resultTuple. Count it
* in the appropriate counter.
*/
int flag;
bool isNull;
@ -232,8 +232,8 @@ ExecInitSetOp(SetOp *node, EState *estate)
* Miscellaneous initialization
*
* SetOp nodes have no ExprContext initialization because they never call
* ExecQual or ExecProject. But they do need a per-tuple memory
* context anyway for calling execTuplesMatch.
* ExecQual or ExecProject. But they do need a per-tuple memory context
* anyway for calling execTuplesMatch.
*/
setopstate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
@ -255,8 +255,8 @@ ExecInitSetOp(SetOp *node, EState *estate)
outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate);
/*
* setop nodes do no projections, so initialize projection info for
* this node appropriately
* setop nodes do no projections, so initialize projection info for this
* node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&setopstate->ps);
setopstate->ps.ps_ProjInfo = NULL;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.50 2005/03/16 21:38:08 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.51 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -56,9 +56,8 @@ ExecSort(SortState *node)
tuplesortstate = (Tuplesortstate *) node->tuplesortstate;
/*
* If first time through, read all tuples from outer plan and pass
* them to tuplesort.c. Subsequent calls just fetch tuples from
* tuplesort.
* If first time through, read all tuples from outer plan and pass them to
* tuplesort.c. Subsequent calls just fetch tuples from tuplesort.
*/
if (!node->sort_Done)
@ -71,8 +70,8 @@ ExecSort(SortState *node)
"sorting subplan");
/*
* Want to scan subplan in the forward direction while creating
* the sorted data.
* Want to scan subplan in the forward direction while creating the
* sorted data.
*/
estate->es_direction = ForwardScanDirection;
@ -191,8 +190,8 @@ ExecInitSort(Sort *node, EState *estate)
outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate);
/*
* initialize tuple type. no need to initialize projection info
* because this node doesn't do projections.
* initialize tuple type. no need to initialize projection info because
* this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan(&sortstate->ss.ps);
ExecAssignScanTypeFromOuterPlan(&sortstate->ss);
@ -286,9 +285,9 @@ void
ExecReScanSort(SortState *node, ExprContext *exprCtxt)
{
/*
* If we haven't sorted yet, just return. If outerplan' chgParam is
* not NULL then it will be re-scanned by ExecProcNode, else - no
* reason to re-scan it at all.
* If we haven't sorted yet, just return. If outerplan' chgParam is not
* NULL then it will be re-scanned by ExecProcNode, else - no reason to
* re-scan it at all.
*/
if (!node->sort_Done)
return;
@ -296,8 +295,8 @@ ExecReScanSort(SortState *node, ExprContext *exprCtxt)
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
/*
* If subnode is to be rescanned then we forget previous sort results;
* we have to re-read the subplan and re-sort.
* If subnode is to be rescanned then we forget previous sort results; we
* have to re-read the subplan and re-sort.
*
* Otherwise we can just rewind and rescan the sorted output.
*/

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.69 2005/05/06 17:24:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -86,15 +86,15 @@ ExecHashSubPlan(SubPlanState *node,
elog(ERROR, "hashed subplan with direct correlation not supported");
/*
* If first time through or we need to rescan the subplan, build the
* hash table.
* If first time through or we need to rescan the subplan, build the hash
* table.
*/
if (node->hashtable == NULL || planstate->chgParam != NULL)
buildSubPlanHash(node);
/*
* The result for an empty subplan is always FALSE; no need to
* evaluate lefthand side.
* The result for an empty subplan is always FALSE; no need to evaluate
* lefthand side.
*/
*isNull = false;
if (!node->havehashrows && !node->havenullrows)
@ -108,34 +108,32 @@ ExecHashSubPlan(SubPlanState *node,
slot = ExecProject(node->projLeft, NULL);
/*
* Note: because we are typically called in a per-tuple context, we
* have to explicitly clear the projected tuple before returning.
* Otherwise, we'll have a double-free situation: the per-tuple
* context will probably be reset before we're called again, and then
* the tuple slot will think it still needs to free the tuple.
* Note: because we are typically called in a per-tuple context, we have
* to explicitly clear the projected tuple before returning. Otherwise,
* we'll have a double-free situation: the per-tuple context will probably
* be reset before we're called again, and then the tuple slot will think
* it still needs to free the tuple.
*/
/*
* Since the hashtable routines will use innerecontext's per-tuple
* memory as working memory, be sure to reset it for each tuple.
* Since the hashtable routines will use innerecontext's per-tuple memory
* as working memory, be sure to reset it for each tuple.
*/
ResetExprContext(innerecontext);
/*
* If the LHS is all non-null, probe for an exact match in the main
* hash table. If we find one, the result is TRUE. Otherwise, scan
* the partly-null table to see if there are any rows that aren't
* provably unequal to the LHS; if so, the result is UNKNOWN. (We
* skip that part if we don't care about UNKNOWN.) Otherwise, the
* result is FALSE.
* If the LHS is all non-null, probe for an exact match in the main hash
* table. If we find one, the result is TRUE. Otherwise, scan the
* partly-null table to see if there are any rows that aren't provably
* unequal to the LHS; if so, the result is UNKNOWN. (We skip that part
* if we don't care about UNKNOWN.) Otherwise, the result is FALSE.
*
* Note: the reason we can avoid a full scan of the main hash table is
* that the combining operators are assumed never to yield NULL when
* both inputs are non-null. If they were to do so, we might need to
* produce UNKNOWN instead of FALSE because of an UNKNOWN result in
* comparing the LHS to some main-table entry --- which is a
* comparison we will not even make, unless there's a chance match of
* hash keys.
* Note: the reason we can avoid a full scan of the main hash table is that
* the combining operators are assumed never to yield NULL when both
* inputs are non-null. If they were to do so, we might need to produce
* UNKNOWN instead of FALSE because of an UNKNOWN result in comparing the
* LHS to some main-table entry --- which is a comparison we will not even
* make, unless there's a chance match of hash keys.
*/
if (slotNoNulls(slot))
{
@ -157,14 +155,14 @@ ExecHashSubPlan(SubPlanState *node,
}
/*
* When the LHS is partly or wholly NULL, we can never return TRUE. If
* we don't care about UNKNOWN, just return FALSE. Otherwise, if the
* LHS is wholly NULL, immediately return UNKNOWN. (Since the
* combining operators are strict, the result could only be FALSE if
* the sub-select were empty, but we already handled that case.)
* Otherwise, we must scan both the main and partly-null tables to see
* if there are any rows that aren't provably unequal to the LHS; if
* so, the result is UNKNOWN. Otherwise, the result is FALSE.
* When the LHS is partly or wholly NULL, we can never return TRUE. If we
* don't care about UNKNOWN, just return FALSE. Otherwise, if the LHS is
* wholly NULL, immediately return UNKNOWN. (Since the combining
* operators are strict, the result could only be FALSE if the sub-select
* were empty, but we already handled that case.) Otherwise, we must scan
* both the main and partly-null tables to see if there are any rows that
* aren't provably unequal to the LHS; if so, the result is UNKNOWN.
* Otherwise, the result is FALSE.
*/
if (node->hashnulls == NULL)
{
@ -217,9 +215,9 @@ ExecScanSubPlan(SubPlanState *node,
ArrayBuildState *astate = NULL;
/*
* We are probably in a short-lived expression-evaluation context.
* Switch to the child plan's per-query context for manipulating its
* chgParam, calling ExecProcNode on it, etc.
* We are probably in a short-lived expression-evaluation context. Switch
* to the child plan's per-query context for manipulating its chgParam,
* calling ExecProcNode on it, etc.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
@ -245,24 +243,23 @@ ExecScanSubPlan(SubPlanState *node,
ExecReScan(planstate, NULL);
/*
* For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the
* result is boolean as are the results of the combining operators. We
* combine results within a tuple (if there are multiple columns)
* using OR semantics if "useOr" is true, AND semantics if not. We
* then combine results across tuples (if the subplan produces more
* than one) using OR semantics for ANY_SUBLINK or AND semantics for
* ALL_SUBLINK. (MULTIEXPR_SUBLINK doesn't allow multiple tuples from
* the subplan.) NULL results from the combining operators are handled
* according to the usual SQL semantics for OR and AND. The result
* for no input tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK,
* NULL for MULTIEXPR_SUBLINK.
* For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the result
* is boolean as are the results of the combining operators. We combine
* results within a tuple (if there are multiple columns) using OR
* semantics if "useOr" is true, AND semantics if not. We then combine
* results across tuples (if the subplan produces more than one) using OR
* semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
* (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.)
* NULL results from the combining operators are handled according to the
* usual SQL semantics for OR and AND. The result for no input tuples is
* FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
* MULTIEXPR_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
* tuple, else an error is raised. For ARRAY_SUBLINK we allow the
* subplan to produce more than one tuple. In either case, if zero
* tuples are produced, we return NULL. Assuming we get a tuple, we
* just use its first column (there can be only one non-junk column in
* this case).
* For EXPR_SUBLINK we require the subplan to produce no more than one tuple,
* else an error is raised. For ARRAY_SUBLINK we allow the subplan to
* produce more than one tuple. In either case, if zero tuples are
* produced, we return NULL. Assuming we get a tuple, we just use its
* first column (there can be only one non-junk column in this case).
*/
result = BoolGetDatum(subLinkType == ALL_SUBLINK);
*isNull = false;
@ -294,12 +291,12 @@ ExecScanSubPlan(SubPlanState *node,
found = true;
/*
* We need to copy the subplan's tuple in case the result is
* of pass-by-ref type --- our return value will point into
* this copied tuple! Can't use the subplan's instance of the
* tuple since it won't still be valid after next
* ExecProcNode() call. node->curTuple keeps track of the
* copied tuple for eventual freeing.
* We need to copy the subplan's tuple in case the result is of
* pass-by-ref type --- our return value will point into this
* copied tuple! Can't use the subplan's instance of the tuple
* since it won't still be valid after next ExecProcNode() call.
* node->curTuple keeps track of the copied tuple for eventual
* freeing.
*/
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
if (node->curTuple)
@ -350,8 +347,7 @@ ExecScanSubPlan(SubPlanState *node,
bool expnull;
/*
* Load up the Param representing this column of the
* sub-select.
* Load up the Param representing this column of the sub-select.
*/
prmdata = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(prmdata->execPlan == NULL);
@ -436,8 +432,8 @@ ExecScanSubPlan(SubPlanState *node,
{
/*
* deal with empty subplan result. result/isNull were previously
* initialized correctly for all sublink types except EXPR, ARRAY,
* and MULTIEXPR; for those, return NULL.
* initialized correctly for all sublink types except EXPR, ARRAY, and
* MULTIEXPR; for those, return NULL.
*/
if (subLinkType == EXPR_SUBLINK ||
subLinkType == ARRAY_SUBLINK ||
@ -478,19 +474,19 @@ buildSubPlanHash(SubPlanState *node)
Assert(!subplan->useOr);
/*
* If we already had any hash tables, destroy 'em; then create empty
* hash table(s).
* If we already had any hash tables, destroy 'em; then create empty hash
* table(s).
*
* If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
* NULL) results of the IN operation, then we have to store subplan
* output rows that are partly or wholly NULL. We store such rows in
* a separate hash table that we expect will be much smaller than the
* main table. (We can use hashing to eliminate partly-null rows that
* are not distinct. We keep them separate to minimize the cost of
* the inevitable full-table searches; see findPartialMatch.)
* NULL) results of the IN operation, then we have to store subplan output
* rows that are partly or wholly NULL. We store such rows in a separate
* hash table that we expect will be much smaller than the main table.
* (We can use hashing to eliminate partly-null rows that are not
* distinct. We keep them separate to minimize the cost of the inevitable
* full-table searches; see findPartialMatch.)
*
* If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
* need to store subplan output rows that contain NULL.
* If it's not necessary to distinguish FALSE and UNKNOWN, then we don't need
* to store subplan output rows that contain NULL.
*/
MemoryContextReset(node->tablecxt);
node->hashtable = NULL;
@ -532,9 +528,8 @@ buildSubPlanHash(SubPlanState *node)
}
/*
* We are probably in a short-lived expression-evaluation context.
* Switch to the child plan's per-query context for calling
* ExecProcNode.
* We are probably in a short-lived expression-evaluation context. Switch
* to the child plan's per-query context for calling ExecProcNode.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
@ -544,9 +539,8 @@ buildSubPlanHash(SubPlanState *node)
ExecReScan(planstate, NULL);
/*
* Scan the subplan and load the hash table(s). Note that when there
* are duplicate rows coming out of the sub-select, only one copy is
* stored.
* Scan the subplan and load the hash table(s). Note that when there are
* duplicate rows coming out of the sub-select, only one copy is stored.
*/
for (slot = ExecProcNode(planstate);
!TupIsNull(slot);
@ -557,8 +551,8 @@ buildSubPlanHash(SubPlanState *node)
bool isnew;
/*
* Load up the Params representing the raw sub-select outputs,
* then form the projection tuple to store in the hashtable.
* Load up the Params representing the raw sub-select outputs, then
* form the projection tuple to store in the hashtable.
*/
foreach(plst, subplan->paramIds)
{
@ -588,18 +582,18 @@ buildSubPlanHash(SubPlanState *node)
}
/*
* Reset innerecontext after each inner tuple to free any memory
* used in hash computation or comparison routines.
* Reset innerecontext after each inner tuple to free any memory used
* in hash computation or comparison routines.
*/
ResetExprContext(innerecontext);
}
/*
* Since the projected tuples are in the sub-query's context and not
* the main context, we'd better clear the tuple slot before there's
* any chance of a reset of the sub-query's context. Else we will
* have the potential for a double free attempt. (XXX possibly
* no longer needed, but can't hurt.)
* Since the projected tuples are in the sub-query's context and not the
* main context, we'd better clear the tuple slot before there's any
* chance of a reset of the sub-query's context. Else we will have the
* potential for a double free attempt. (XXX possibly no longer needed,
* but can't hurt.)
*/
ExecClearTuple(node->projRight->pi_slot);
@ -710,10 +704,10 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
/*
* create an EState for the subplan
*
* The subquery needs its own EState because it has its own rangetable.
* It shares our Param ID space, however. XXX if rangetable access
* were done differently, the subquery could share our EState, which
* would eliminate some thrashing about in this module...
* The subquery needs its own EState because it has its own rangetable. It
* shares our Param ID space, however. XXX if rangetable access were done
* differently, the subquery could share our EState, which would eliminate
* some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
node->sub_estate = sp_estate;
@ -739,13 +733,12 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
MemoryContextSwitchTo(oldcontext);
/*
* If this plan is un-correlated or undirect correlated one and want
* to set params for parent plan then mark parameters as needing
* evaluation.
* If this plan is un-correlated or undirect correlated one and want to
* set params for parent plan then mark parameters as needing evaluation.
*
* Note that in the case of un-correlated subqueries we don't care about
* setting parent->chgParam here: indices take care about it, for
* others - it doesn't matter...
* setting parent->chgParam here: indices take care about it, for others -
* it doesn't matter...
*/
if (subplan->setParam != NIL)
{
@ -761,8 +754,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
}
/*
* If we are going to hash the subquery output, initialize relevant
* stuff. (We don't create the hashtable until needed, though.)
* If we are going to hash the subquery output, initialize relevant stuff.
* (We don't create the hashtable until needed, though.)
*/
if (subplan->useHashTable)
{
@ -794,18 +787,17 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
/*
* We use ExecProject to evaluate the lefthand and righthand
* expression lists and form tuples. (You might think that we
* could use the sub-select's output tuples directly, but that is
* not the case if we had to insert any run-time coercions of the
* sub-select's output datatypes; anyway this avoids storing any
* resjunk columns that might be in the sub-select's output.) Run
* through the combining expressions to build tlists for the
* lefthand and righthand sides. We need both the ExprState list
* (for ExecProject) and the underlying parse Exprs (for
* ExecTypeFromTL).
* expression lists and form tuples. (You might think that we could
* use the sub-select's output tuples directly, but that is not the
* case if we had to insert any run-time coercions of the sub-select's
* output datatypes; anyway this avoids storing any resjunk columns
* that might be in the sub-select's output.) Run through the
* combining expressions to build tlists for the lefthand and
* righthand sides. We need both the ExprState list (for ExecProject)
* and the underlying parse Exprs (for ExecTypeFromTL).
*
* We also extract the combining operators themselves to initialize
* the equality and hashing functions for the hash tables.
* We also extract the combining operators themselves to initialize the
* equality and hashing functions for the hash tables.
*/
lefttlist = righttlist = NIL;
leftptlist = rightptlist = NIL;
@ -869,21 +861,21 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
}
/*
* Create a tupletable to hold these tuples. (Note: we never
* bother to free the tupletable explicitly; that's okay because
* it will never store raw disk tuples that might have associated
* buffer pins. The only resource involved is memory, which will
* be cleaned up by freeing the query context.)
* Create a tupletable to hold these tuples. (Note: we never bother
* to free the tupletable explicitly; that's okay because it will
* never store raw disk tuples that might have associated buffer pins.
* The only resource involved is memory, which will be cleaned up by
* freeing the query context.)
*/
tupTable = ExecCreateTupleTable(2);
/*
* Construct tupdescs, slots and projection nodes for left and
* right sides. The lefthand expressions will be evaluated in the
* parent plan node's exprcontext, which we don't have access to
* here. Fortunately we can just pass NULL for now and fill it in
* later (hack alert!). The righthand expressions will be
* evaluated in our own innerecontext.
* Construct tupdescs, slots and projection nodes for left and right
* sides. The lefthand expressions will be evaluated in the parent
* plan node's exprcontext, which we don't have access to here.
* Fortunately we can just pass NULL for now and fill it in later
* (hack alert!). The righthand expressions will be evaluated in our
* own innerecontext.
*/
tupDesc = ExecTypeFromTL(leftptlist, false);
slot = ExecAllocTableSlot(tupTable);
@ -983,11 +975,10 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
found = true;
/*
* We need to copy the subplan's tuple into our own context, in
* case any of the params are pass-by-ref type --- the pointers
* stored in the param structs will point at this copied tuple!
* node->curTuple keeps track of the copied tuple for eventual
* freeing.
* We need to copy the subplan's tuple into our own context, in case
* any of the params are pass-by-ref type --- the pointers stored in
* the param structs will point at this copied tuple! node->curTuple
* keeps track of the copied tuple for eventual freeing.
*/
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
if (node->curTuple)

View File

@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.26 2005/05/22 22:30:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.27 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -62,13 +62,13 @@ SubqueryNext(SubqueryScanState *node)
direction = estate->es_direction;
/*
* We need not support EvalPlanQual here, since we are not scanning a
* real relation.
* We need not support EvalPlanQual here, since we are not scanning a real
* relation.
*/
/*
* Get the next tuple from the sub-query. We have to be careful to
* run it in its appropriate memory context.
* Get the next tuple from the sub-query. We have to be careful to run it
* in its appropriate memory context.
*/
node->sss_SubEState->es_direction = direction;
@ -170,11 +170,10 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate)
ExecCheckRTPerms(rte->subquery->rtable);
/*
* The subquery needs its own EState because it has its own
* rangetable. It shares our Param ID space, however. XXX if
* rangetable access were done differently, the subquery could share
* our EState, which would eliminate some thrashing about in this
* module...
* The subquery needs its own EState because it has its own rangetable. It
* shares our Param ID space, however. XXX if rangetable access were done
* differently, the subquery could share our EState, which would eliminate
* some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
subquerystate->sss_SubEState = sp_estate;
@ -246,7 +245,7 @@ ExecEndSubqueryScan(SubqueryScanState *node)
* clean out the upper tuple table
*/
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */
node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */
/*
* close down subquery
@ -278,9 +277,8 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
/*
* ExecReScan doesn't know about my subplan, so I have to do
* changed-parameter signaling myself. This is just as well, because
* the subplan has its own memory context in which its chgParam state
* lives.
* changed-parameter signaling myself. This is just as well, because the
* subplan has its own memory context in which its chgParam state lives.
*/
if (node->ss.ps.chgParam != NULL)
UpdateChangedParamSet(node->subplan, node->ss.ps.chgParam);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.42 2005/09/22 15:09:51 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.43 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -126,8 +126,8 @@ TidNext(TidScanState *node)
return slot; /* return empty slot */
/*
* XXX shouldn't we check here to make sure tuple matches TID
* list? In runtime-key case this is not certain, is it?
* XXX shouldn't we check here to make sure tuple matches TID list? In
* runtime-key case this is not certain, is it?
*/
ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
@ -150,9 +150,9 @@ TidNext(TidScanState *node)
tuple = &(node->tss_htup);
/*
* ok, now that we have what we need, fetch an tid tuple. if scanning
* this tid succeeded then return the appropriate heap tuple.. else
* return NULL.
* ok, now that we have what we need, fetch an tid tuple. if scanning this
* tid succeeded then return the appropriate heap tuple.. else return
* NULL.
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
@ -184,10 +184,10 @@ TidNext(TidScanState *node)
/*
* store the scanned tuple in the scan tuple slot of the scan
* state. Eventually we will only do this and not return a
* tuple. Note: we pass 'false' because tuples returned by
* amgetnext are pointers onto disk pages and were not created
* with palloc() and so should not be pfree()'d.
* state. Eventually we will only do this and not return a tuple.
* Note: we pass 'false' because tuples returned by amgetnext are
* pointers onto disk pages and were not created with palloc() and
* so should not be pfree()'d.
*/
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
@ -196,8 +196,7 @@ TidNext(TidScanState *node)
/*
* At this point we have an extra pin on the buffer, because
* ExecStoreTuple incremented the pin count. Drop our local
* pin.
* ExecStoreTuple incremented the pin count. Drop our local pin.
*/
ReleaseBuffer(buffer);
@ -229,8 +228,8 @@ TidNext(TidScanState *node)
}
/*
* if we get here it means the tid scan failed so we are at the end of
* the scan..
* if we get here it means the tid scan failed so we are at the end of the
* scan..
*/
return ExecClearTuple(slot);
}
@ -420,8 +419,8 @@ ExecInitTidScan(TidScan *node, EState *estate)
tidstate->tss_TidPtr = -1;
/*
* get the range table and direction information from the execution
* state (these are needed to open the relations).
* get the range table and direction information from the execution state
* (these are needed to open the relations).
*/
rangeTable = estate->es_range_table;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.47 2005/05/06 17:24:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -56,10 +56,10 @@ ExecUnique(UniqueState *node)
* now loop, returning only non-duplicate tuples. We assume that the
* tuples arrive in sorted order so we can detect duplicates easily.
*
* We return the first tuple from each group of duplicates (or the last
* tuple of each group, when moving backwards). At either end of the
* subplan, clear the result slot so that we correctly return the
* first/last tuple when reversing direction.
* We return the first tuple from each group of duplicates (or the last tuple
* of each group, when moving backwards). At either end of the subplan,
* clear the result slot so that we correctly return the first/last tuple
* when reversing direction.
*/
for (;;)
{
@ -81,9 +81,9 @@ ExecUnique(UniqueState *node)
break;
/*
* Else test if the new tuple and the previously returned tuple
* match. If so then we loop back and fetch another new tuple
* from the subplan.
* Else test if the new tuple and the previously returned tuple match.
* If so then we loop back and fetch another new tuple from the
* subplan.
*/
if (!execTuplesMatch(slot, resultTupleSlot,
plannode->numCols, plannode->uniqColIdx,
@ -93,10 +93,10 @@ ExecUnique(UniqueState *node)
}
/*
* We have a new tuple different from the previous saved tuple (if
* any). Save it and return it. We must copy it because the source
* subplan won't guarantee that this source tuple is still accessible
* after fetching the next source tuple.
* We have a new tuple different from the previous saved tuple (if any).
* Save it and return it. We must copy it because the source subplan
* won't guarantee that this source tuple is still accessible after
* fetching the next source tuple.
*/
return ExecCopySlot(resultTupleSlot, slot);
}
@ -123,9 +123,9 @@ ExecInitUnique(Unique *node, EState *estate)
/*
* Miscellaneous initialization
*
* Unique nodes have no ExprContext initialization because they never
* call ExecQual or ExecProject. But they do need a per-tuple memory
* context anyway for calling execTuplesMatch.
* Unique nodes have no ExprContext initialization because they never call
* ExecQual or ExecProject. But they do need a per-tuple memory context
* anyway for calling execTuplesMatch.
*/
uniquestate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
@ -147,8 +147,8 @@ ExecInitUnique(Unique *node, EState *estate)
outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate);
/*
* unique nodes do no projections, so initialize projection info for
* this node appropriately
* unique nodes do no projections, so initialize projection info for this
* node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&uniquestate->ps);
uniquestate->ps.ps_ProjInfo = NULL;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.142 2005/10/01 18:43:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.143 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -38,11 +38,11 @@ static int _SPI_curid = -1;
static void _SPI_prepare_plan(const char *src, _SPI_plan *plan);
static int _SPI_execute_plan(_SPI_plan *plan,
Datum *Values, const char *Nulls,
Snapshot snapshot, Snapshot crosscheck_snapshot,
bool read_only, long tcount);
Datum *Values, const char *Nulls,
Snapshot snapshot, Snapshot crosscheck_snapshot,
bool read_only, long tcount);
static int _SPI_pquery(QueryDesc *queryDesc, long tcount);
static int _SPI_pquery(QueryDesc *queryDesc, long tcount);
static void _SPI_error_callback(void *arg);
@ -66,8 +66,8 @@ SPI_connect(void)
int newdepth;
/*
* When procedure called by Executor _SPI_curid expected to be equal
* to _SPI_connected
* When procedure called by Executor _SPI_curid expected to be equal to
* _SPI_connected
*/
if (_SPI_curid != _SPI_connected)
return SPI_ERROR_CONNECT;
@ -106,28 +106,28 @@ SPI_connect(void)
_SPI_current->processed = 0;
_SPI_current->lastoid = InvalidOid;
_SPI_current->tuptable = NULL;
_SPI_current->procCxt = NULL; /* in case we fail to create 'em */
_SPI_current->procCxt = NULL; /* in case we fail to create 'em */
_SPI_current->execCxt = NULL;
_SPI_current->connectSubid = GetCurrentSubTransactionId();
/*
* Create memory contexts for this procedure
*
* XXX it would be better to use PortalContext as the parent context, but
* we may not be inside a portal (consider deferred-trigger
* execution). Perhaps CurTransactionContext would do? For now it
* doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
* XXX it would be better to use PortalContext as the parent context, but we
* may not be inside a portal (consider deferred-trigger execution).
* Perhaps CurTransactionContext would do? For now it doesn't matter
* because we clean up explicitly in AtEOSubXact_SPI().
*/
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Proc",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
_SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Exec",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* ... and switch to procedure's context */
_SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
@ -161,9 +161,9 @@ SPI_finish(void)
SPI_tuptable = NULL;
/*
* After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are
* closing connection to SPI and returning to upper Executor and so
* _SPI_connected must be equal to _SPI_curid.
* After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are closing
* connection to SPI and returning to upper Executor and so _SPI_connected
* must be equal to _SPI_curid.
*/
_SPI_connected--;
_SPI_curid--;
@ -182,9 +182,9 @@ void
AtEOXact_SPI(bool isCommit)
{
/*
* Note that memory contexts belonging to SPI stack entries will be
* freed automatically, so we can ignore them here. We just need to
* restore our static variables to initial state.
* Note that memory contexts belonging to SPI stack entries will be freed
* automatically, so we can ignore them here. We just need to restore our
* static variables to initial state.
*/
if (isCommit && _SPI_connected != -1)
ereport(WARNING,
@ -236,8 +236,8 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
/*
* Pop the stack entry and reset global variables. Unlike
* SPI_finish(), we don't risk switching to memory contexts that
* might be already gone.
* SPI_finish(), we don't risk switching to memory contexts that might
* be already gone.
*/
_SPI_connected--;
_SPI_curid = _SPI_connected;
@ -560,8 +560,8 @@ SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
mtuple = heap_formtuple(rel->rd_att, v, n);
/*
* copy the identification info of the old tuple: t_ctid, t_self,
* and OID (if any)
* copy the identification info of the old tuple: t_ctid, t_self, and
* OID (if any)
*/
mtuple->t_data->t_ctid = tuple->t_data->t_ctid;
mtuple->t_self = tuple->t_self;
@ -658,8 +658,8 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
getTypeOutputInfo(typoid, &foutoid, &typisvarlena);
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
* If we have a toasted datum, forcibly detoast it here to avoid memory
* leakage inside the type's output routine.
*/
if (typisvarlena)
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
@ -755,7 +755,7 @@ SPI_getrelname(Relation rel)
char *
SPI_getnspname(Relation rel)
{
return get_namespace_name(RelationGetNamespace(rel));
return get_namespace_name(RelationGetNamespace(rel));
}
void *
@ -939,8 +939,8 @@ SPI_cursor_open(const char *name, void *plan,
portal->cursorOptions |= CURSOR_OPT_NO_SCROLL;
/*
* Set up the snapshot to use. (PortalStart will do CopySnapshot,
* so we skip that here.)
* Set up the snapshot to use. (PortalStart will do CopySnapshot, so we
* skip that here.)
*/
if (read_only)
snapshot = ActiveSnapshot;
@ -1214,7 +1214,7 @@ spi_printtup(TupleTableSlot *slot, DestReceiver *self)
tuptable->free = 256;
tuptable->alloced += tuptable->free;
tuptable->vals = (HeapTuple *) repalloc(tuptable->vals,
tuptable->alloced * sizeof(HeapTuple));
tuptable->alloced * sizeof(HeapTuple));
}
tuptable->vals[tuptable->alloced - tuptable->free] =
@ -1247,9 +1247,9 @@ _SPI_prepare_plan(const char *src, _SPI_plan *plan)
int nargs = plan->nargs;
/*
* Increment CommandCounter to see changes made by now. We must do
* this to be sure of seeing any schema changes made by a just-preceding
* SPI command. (But we don't bother advancing the snapshot, since the
* Increment CommandCounter to see changes made by now. We must do this
* to be sure of seeing any schema changes made by a just-preceding SPI
* command. (But we don't bother advancing the snapshot, since the
* planner generally operates under SnapshotNow rules anyway.)
*/
CommandCounterIncrement();
@ -1270,9 +1270,9 @@ _SPI_prepare_plan(const char *src, _SPI_plan *plan)
/*
* Do parse analysis and rule rewrite for each raw parsetree.
*
* We save the querytrees from each raw parsetree as a separate
* sublist. This allows _SPI_execute_plan() to know where the
* boundaries between original queries fall.
* We save the querytrees from each raw parsetree as a separate sublist.
* This allows _SPI_execute_plan() to know where the boundaries between
* original queries fall.
*/
query_list_list = NIL;
plan_list = NIL;
@ -1316,7 +1316,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
volatile int res = 0;
volatile uint32 my_processed = 0;
volatile Oid my_lastoid = InvalidOid;
SPITupleTable * volatile my_tuptable = NULL;
SPITupleTable *volatile my_tuptable = NULL;
Snapshot saveActiveSnapshot;
/* Be sure to restore ActiveSnapshot on error exit */
@ -1407,9 +1407,10 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
if (read_only && !QueryIsReadOnly(queryTree))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a non-volatile function",
CreateQueryTag(queryTree))));
/* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a non-volatile function",
CreateQueryTag(queryTree))));
/*
* If not read-only mode, advance the command counter before
* each command.
@ -1462,6 +1463,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
}
FreeSnapshot(ActiveSnapshot);
ActiveSnapshot = NULL;
/*
* The last canSetTag query sets the auxiliary values returned
* to the caller. Be careful to free any tuptables not
@ -1520,10 +1522,10 @@ _SPI_pquery(QueryDesc *queryDesc, long tcount)
{
case CMD_SELECT:
res = SPI_OK_SELECT;
if (queryDesc->parsetree->into) /* select into table? */
if (queryDesc->parsetree->into) /* select into table? */
{
res = SPI_OK_SELINTO;
queryDesc->dest = None_Receiver; /* don't output results */
queryDesc->dest = None_Receiver; /* don't output results */
}
else if (queryDesc->dest->mydest != SPI)
{
@ -1589,8 +1591,8 @@ _SPI_error_callback(void *arg)
int syntaxerrposition;
/*
* If there is a syntax error position, convert to internal syntax
* error; otherwise treat the query as an item of context stack
* If there is a syntax error position, convert to internal syntax error;
* otherwise treat the query as an item of context stack
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
@ -1635,13 +1637,12 @@ _SPI_cursor_operation(Portal portal, bool forward, long count,
dest);
/*
* Think not to combine this store with the preceding function call.
* If the portal contains calls to functions that use SPI, then
* SPI_stack is likely to move around while the portal runs. When
* control returns, _SPI_current will point to the correct stack
* entry... but the pointer may be different than it was beforehand.
* So we must be sure to re-fetch the pointer after the function call
* completes.
* Think not to combine this store with the preceding function call. If
* the portal contains calls to functions that use SPI, then SPI_stack is
* likely to move around while the portal runs. When control returns,
* _SPI_current will point to the correct stack entry... but the pointer
* may be different than it was beforehand. So we must be sure to re-fetch
* the pointer after the function call completes.
*/
_SPI_current->processed = nfetched;
@ -1738,12 +1739,13 @@ _SPI_copy_plan(_SPI_plan *plan, int location)
parentcxt = _SPI_current->procCxt;
else if (location == _SPI_CPLAN_TOPCXT)
parentcxt = TopMemoryContext;
else /* (this case not currently used) */
else
/* (this case not currently used) */
parentcxt = CurrentMemoryContext;
/*
* Create a memory context for the plan. We don't expect the plan to
* be very large, so use smaller-than-default alloc parameters.
* Create a memory context for the plan. We don't expect the plan to be
* very large, so use smaller-than-default alloc parameters.
*/
plancxt = AllocSetContextCreate(parentcxt,
"SPI Plan",