mirror of
https://github.com/postgres/postgres.git
synced 2025-07-05 07:21:24 +03:00
Several changes here, not very related but touching some of the same files.
* Buffer refcount cleanup (per my "progress report" to pghackers, 9/22). * Add links to backend PROC structs to sinval's array of per-backend info, and use these links for routines that need to check the state of all backends (rather than the slow, complicated search of the ShmemIndex hashtable that was used before). Add databaseOID to PROC structs. * Use this to implement an interlock that prevents DESTROY DATABASE of a database containing running backends. (It's a little tricky to prevent a concurrently-starting backend from getting in there, since the new backend is not able to lock anything at the time it tries to look up its database in pg_database. My solution is to recheck that the DB is OK at the end of InitPostgres. It may not be a 100% solution, but it's a lot better than no interlock at all...) * In ALTER TABLE RENAME, flush buffers for the relation before doing the rename of the physical files, to ensure we don't get failures later from mdblindwrt(). * Update TRUNCATE patch so that it actually compiles against current sources :-(. You should do "make clean all" after pulling these changes.
This commit is contained in:
@ -26,7 +26,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.94 1999/09/18 19:06:47 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.95 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -130,16 +130,6 @@ ExecutorStart(QueryDesc *queryDesc, EState *estate)
|
||||
queryDesc->plantree,
|
||||
estate);
|
||||
|
||||
/*
|
||||
* reset buffer refcount. the current refcounts are saved and will be
|
||||
* restored when ExecutorEnd is called
|
||||
*
|
||||
* this makes sure that when ExecutorRun's are called recursively as for
|
||||
* postquel functions, the buffers pinned by one ExecutorRun will not
|
||||
* be unpinned by another ExecutorRun.
|
||||
*/
|
||||
BufferRefCountReset(estate->es_refcount);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -385,10 +375,6 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
|
||||
pfree(estate->es_param_exec_vals);
|
||||
estate->es_param_exec_vals = NULL;
|
||||
}
|
||||
|
||||
/* restore saved refcounts. */
|
||||
BufferRefCountRestore(estate->es_refcount);
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
@ -802,7 +788,7 @@ EndPlan(Plan *plan, EState *estate)
|
||||
{
|
||||
TupleTable tupleTable = (TupleTable) estate->es_tupleTable;
|
||||
|
||||
ExecDestroyTupleTable(tupleTable, true); /* was missing last arg */
|
||||
ExecDestroyTupleTable(tupleTable, true);
|
||||
estate->es_tupleTable = NULL;
|
||||
}
|
||||
|
||||
@ -1678,7 +1664,6 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
||||
sizeof(ParamExecData));
|
||||
epqstate->es_tupleTable =
|
||||
ExecCreateTupleTable(estate->es_tupleTable->size);
|
||||
epqstate->es_refcount = estate->es_refcount;
|
||||
/* ... rest */
|
||||
newepq->plan = copyObject(estate->es_origPlan);
|
||||
newepq->free = NULL;
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.59 1999/09/18 23:26:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.60 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -637,7 +637,8 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
|
||||
|
||||
if (!(*argIsDone))
|
||||
{
|
||||
Assert(i == 0);
|
||||
if (i != 0)
|
||||
elog(ERROR, "functions can only take sets in their first argument");
|
||||
fcache->setArg = (char *) argV[0];
|
||||
fcache->hasSetArg = true;
|
||||
}
|
||||
@ -758,35 +759,48 @@ ExecMakeFunctionResult(Node *node,
|
||||
if (fcache->language == SQLlanguageId)
|
||||
{
|
||||
Datum result;
|
||||
bool argDone;
|
||||
|
||||
Assert(funcNode);
|
||||
result = postquel_function(funcNode, (char **) argV, isNull, isDone);
|
||||
|
||||
/*
|
||||
* finagle the situation where we are iterating through all
|
||||
* results in a nested dot function (whose argument function
|
||||
/*--------------------
|
||||
* This loop handles the situation where we are iterating through
|
||||
* all results in a nested dot function (whose argument function
|
||||
* returns a set of tuples) and the current function finally
|
||||
* finishes. We need to get the next argument in the set and run
|
||||
* the function all over again. This is getting unclean.
|
||||
* finishes. We need to get the next argument in the set and start
|
||||
* the function all over again. We might have to do it more than
|
||||
* once, if the function produces no results for a particular argument.
|
||||
* This is getting unclean.
|
||||
*--------------------
|
||||
*/
|
||||
if ((*isDone) && (fcache->hasSetArg))
|
||||
for (;;)
|
||||
{
|
||||
bool argDone;
|
||||
result = postquel_function(funcNode, (char **) argV,
|
||||
isNull, isDone);
|
||||
|
||||
if (! *isDone)
|
||||
break; /* got a result from current argument */
|
||||
if (! fcache->hasSetArg)
|
||||
break; /* input not a set, so done */
|
||||
|
||||
/* OK, get the next argument... */
|
||||
ExecEvalFuncArgs(fcache, econtext, arguments, argV, &argDone);
|
||||
|
||||
if (argDone)
|
||||
{
|
||||
/* End of arguments, so reset the setArg flag and say "Done" */
|
||||
fcache->setArg = (char *) NULL;
|
||||
fcache->hasSetArg = false;
|
||||
*isDone = true;
|
||||
result = (Datum) NULL;
|
||||
break;
|
||||
}
|
||||
else
|
||||
result = postquel_function(funcNode,
|
||||
(char **) argV,
|
||||
isNull,
|
||||
isDone);
|
||||
|
||||
/* If we reach here, loop around to run the function on the
|
||||
* new argument.
|
||||
*/
|
||||
}
|
||||
|
||||
if (funcisset)
|
||||
{
|
||||
|
||||
@ -805,6 +819,7 @@ ExecMakeFunctionResult(Node *node,
|
||||
if (*isDone)
|
||||
((Func *) node)->func_fcache = NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
else
|
||||
@ -1424,8 +1439,10 @@ ExecTargetList(List *targetlist,
|
||||
{
|
||||
char nulls_array[64];
|
||||
bool fjNullArray[64];
|
||||
bool *fjIsNull;
|
||||
bool itemIsDoneArray[64];
|
||||
char *null_head;
|
||||
bool *fjIsNull;
|
||||
bool *itemIsDone;
|
||||
List *tl;
|
||||
TargetEntry *tle;
|
||||
Node *expr;
|
||||
@ -1434,6 +1451,7 @@ ExecTargetList(List *targetlist,
|
||||
Datum constvalue;
|
||||
HeapTuple newTuple;
|
||||
bool isNull;
|
||||
bool haveDoneIters;
|
||||
static struct tupleDesc NullTupleDesc; /* we assume this inits to zeroes */
|
||||
|
||||
/*
|
||||
@ -1457,24 +1475,30 @@ ExecTargetList(List *targetlist,
|
||||
/*
|
||||
* allocate an array of char's to hold the "null" information only if
|
||||
* we have a really large targetlist. otherwise we use the stack.
|
||||
*
|
||||
* We also allocate a bool array that is used to hold fjoin result state,
|
||||
* and another that holds the isDone status for each targetlist item.
|
||||
*/
|
||||
if (nodomains > 64)
|
||||
{
|
||||
null_head = (char *) palloc(nodomains + 1);
|
||||
fjIsNull = (bool *) palloc(nodomains + 1);
|
||||
itemIsDone = (bool *) palloc(nodomains + 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
null_head = &nulls_array[0];
|
||||
fjIsNull = &fjNullArray[0];
|
||||
itemIsDone = &itemIsDoneArray[0];
|
||||
}
|
||||
|
||||
/*
|
||||
* evaluate all the expressions in the target list
|
||||
*/
|
||||
EV_printf("ExecTargetList: setting target list values\n");
|
||||
|
||||
*isDone = true;
|
||||
*isDone = true; /* until proven otherwise */
|
||||
haveDoneIters = false; /* any isDone Iter exprs in tlist? */
|
||||
|
||||
foreach(tl, targetlist)
|
||||
{
|
||||
|
||||
@ -1493,13 +1517,11 @@ ExecTargetList(List *targetlist,
|
||||
expr = tle->expr;
|
||||
resdom = tle->resdom;
|
||||
resind = resdom->resno - 1;
|
||||
|
||||
constvalue = (Datum) ExecEvalExpr(expr,
|
||||
econtext,
|
||||
&isNull,
|
||||
isDone);
|
||||
|
||||
if ((IsA(expr, Iter)) && (*isDone))
|
||||
return (HeapTuple) NULL;
|
||||
&itemIsDone[resind]);
|
||||
|
||||
values[resind] = constvalue;
|
||||
|
||||
@ -1507,6 +1529,14 @@ ExecTargetList(List *targetlist,
|
||||
null_head[resind] = ' ';
|
||||
else
|
||||
null_head[resind] = 'n';
|
||||
|
||||
if (IsA(expr, Iter))
|
||||
{
|
||||
if (itemIsDone[resind])
|
||||
haveDoneIters = true;
|
||||
else
|
||||
*isDone = false; /* we have undone Iters in the list */
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1518,6 +1548,8 @@ ExecTargetList(List *targetlist,
|
||||
DatumPtr results = fjNode->fj_results;
|
||||
|
||||
ExecEvalFjoin(tle, econtext, fjIsNull, isDone);
|
||||
|
||||
/* this is probably wrong: */
|
||||
if (*isDone)
|
||||
return (HeapTuple) NULL;
|
||||
|
||||
@ -1558,18 +1590,86 @@ ExecTargetList(List *targetlist,
|
||||
}
|
||||
}
|
||||
|
||||
if (haveDoneIters)
|
||||
{
|
||||
if (*isDone)
|
||||
{
|
||||
/* all Iters are done, so return a null indicating tlist set
|
||||
* expansion is complete.
|
||||
*/
|
||||
newTuple = NULL;
|
||||
goto exit;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have some done and some undone Iters. Restart the done ones
|
||||
* so that we can deliver a tuple (if possible).
|
||||
*
|
||||
* XXX this code is a crock, because it only works for Iters at
|
||||
* the top level of tlist expressions, and doesn't even work right
|
||||
* for them: you should get all possible combinations of Iter
|
||||
* results, but you won't unless the numbers of values returned by
|
||||
* each are relatively prime. Should have a mechanism more like
|
||||
* aggregate functions, where we make a list of all Iters
|
||||
* contained in the tlist and cycle through their values in a
|
||||
* methodical fashion. To do someday; can't get excited about
|
||||
* fixing a Berkeley feature that's not in SQL92. (The only
|
||||
* reason we're doing this much is that we have to be sure all
|
||||
* the Iters are run to completion, or their subplan executors
|
||||
* will have unreleased resources, e.g. pinned buffers...)
|
||||
*/
|
||||
foreach(tl, targetlist)
|
||||
{
|
||||
tle = lfirst(tl);
|
||||
|
||||
if (tle->resdom != NULL)
|
||||
{
|
||||
expr = tle->expr;
|
||||
resdom = tle->resdom;
|
||||
resind = resdom->resno - 1;
|
||||
|
||||
if (IsA(expr, Iter) && itemIsDone[resind])
|
||||
{
|
||||
constvalue = (Datum) ExecEvalExpr(expr,
|
||||
econtext,
|
||||
&isNull,
|
||||
&itemIsDone[resind]);
|
||||
if (itemIsDone[resind])
|
||||
{
|
||||
/* Oh dear, this Iter is returning an empty set.
|
||||
* Guess we can't make a tuple after all.
|
||||
*/
|
||||
*isDone = true;
|
||||
newTuple = NULL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
values[resind] = constvalue;
|
||||
|
||||
if (!isNull)
|
||||
null_head[resind] = ' ';
|
||||
else
|
||||
null_head[resind] = 'n';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* form the new result tuple (in the "normal" context)
|
||||
*/
|
||||
newTuple = (HeapTuple) heap_formtuple(targettype, values, null_head);
|
||||
|
||||
exit:
|
||||
/*
|
||||
* free the nulls array if we allocated one..
|
||||
* free the status arrays if we palloc'd them
|
||||
*/
|
||||
if (nodomains > 64)
|
||||
{
|
||||
pfree(null_head);
|
||||
pfree(fjIsNull);
|
||||
pfree(itemIsDone);
|
||||
}
|
||||
|
||||
return newTuple;
|
||||
|
@ -14,7 +14,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.29 1999/07/17 20:16:57 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.30 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -38,9 +38,6 @@
|
||||
* ExecSetSlotDescriptor - set a slot's tuple descriptor
|
||||
* ExecSetSlotDescriptorIsNew - diddle the slot-desc-is-new flag
|
||||
* ExecSetNewSlotDescriptor - set a desc and the is-new-flag all at once
|
||||
* ExecSlotBuffer - return buffer of tuple in slot
|
||||
* ExecSetSlotBuffer - set the buffer for tuple in slot
|
||||
* ExecIncrSlotBufferRefcnt - bump the refcnt of the slot buffer(Macro)
|
||||
*
|
||||
* SLOT STATUS PREDICATES
|
||||
* TupIsNull - true when slot contains no tuple(Macro)
|
||||
@ -193,7 +190,7 @@ ExecDestroyTupleTable(TupleTable table, /* tuple table */
|
||||
bool shouldFree) /* true if we should free slot
|
||||
* contents */
|
||||
{
|
||||
int next; /* next avaliable slot */
|
||||
int next; /* next available slot */
|
||||
TupleTableSlot *array; /* start of table array */
|
||||
int i; /* counter */
|
||||
|
||||
@ -212,38 +209,27 @@ ExecDestroyTupleTable(TupleTable table, /* tuple table */
|
||||
|
||||
/* ----------------
|
||||
* first free all the valid pointers in the tuple array
|
||||
* if that's what the caller wants..
|
||||
* and drop refcounts of any referenced buffers,
|
||||
* if that's what the caller wants. (There is probably
|
||||
* no good reason for the caller ever not to want it!)
|
||||
*
|
||||
* Note: we do nothing about the Buffer and Tuple Descriptor's
|
||||
* Note: we do nothing about the Tuple Descriptor's
|
||||
* we store in the slots. This may have to change (ex: we should
|
||||
* probably worry about pfreeing tuple descs too) -cim 3/14/91
|
||||
*
|
||||
* Right now, the handling of tuple pointers and buffer refcounts
|
||||
* is clean, but the handling of tuple descriptors is NOT; they
|
||||
* are copied around with wild abandon. It would take some work
|
||||
* to make tuple descs pfree'able. Fortunately, since they're
|
||||
* normally only made once per scan, it's probably not worth
|
||||
* worrying about... tgl 9/21/99
|
||||
* ----------------
|
||||
*/
|
||||
if (shouldFree)
|
||||
{
|
||||
for (i = 0; i < next; i++)
|
||||
{
|
||||
TupleTableSlot slot;
|
||||
HeapTuple tuple;
|
||||
|
||||
slot = array[i];
|
||||
tuple = slot.val;
|
||||
|
||||
if (tuple != NULL)
|
||||
{
|
||||
slot.val = (HeapTuple) NULL;
|
||||
if (slot.ttc_shouldFree)
|
||||
{
|
||||
/* ----------------
|
||||
* since a tuple may contain a pointer to
|
||||
* lock information allocated along with the
|
||||
* tuple, we have to be careful to free any
|
||||
* rule locks also -cim 1/17/90
|
||||
* ----------------
|
||||
*/
|
||||
pfree(tuple);
|
||||
}
|
||||
}
|
||||
}
|
||||
ExecClearTuple(&array[i]);
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* finally free the tuple array and the table itself.
|
||||
@ -274,6 +260,7 @@ TupleTableSlot * /* return: the slot allocated in the tuple
|
||||
ExecAllocTableSlot(TupleTable table)
|
||||
{
|
||||
int slotnum; /* new slot number */
|
||||
TupleTableSlot* slot;
|
||||
|
||||
/* ----------------
|
||||
* sanity checks
|
||||
@ -319,9 +306,18 @@ ExecAllocTableSlot(TupleTable table)
|
||||
slotnum = table->next;
|
||||
table->next++;
|
||||
|
||||
table->array[slotnum].type = T_TupleTableSlot;
|
||||
slot = &(table->array[slotnum]);
|
||||
|
||||
return &(table->array[slotnum]);
|
||||
/* Make sure the allocated slot is valid (and empty) */
|
||||
slot->type = T_TupleTableSlot;
|
||||
slot->val = (HeapTuple) NULL;
|
||||
slot->ttc_shouldFree = true;
|
||||
slot->ttc_descIsNew = true;
|
||||
slot->ttc_tupleDescriptor = (TupleDesc) NULL;
|
||||
slot->ttc_buffer = InvalidBuffer;
|
||||
slot->ttc_whichplan = -1;
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
@ -333,26 +329,49 @@ ExecAllocTableSlot(TupleTable table)
|
||||
* ExecStoreTuple
|
||||
*
|
||||
* This function is used to store a tuple into a specified
|
||||
* slot in the tuple table. Note: the only slots which should
|
||||
* be called with shouldFree == false are those slots used to
|
||||
* store tuples not allocated with pfree(). Currently the
|
||||
* seqscan and indexscan nodes use this for the tuples returned
|
||||
* by amgetattr, which are actually pointers onto disk pages.
|
||||
* slot in the tuple table.
|
||||
*
|
||||
* tuple: tuple to store
|
||||
* slot: slot to store it in
|
||||
* buffer: disk buffer if tuple is in a disk page, else InvalidBuffer
|
||||
* shouldFree: true if ExecClearTuple should pfree() the tuple
|
||||
* when done with it
|
||||
*
|
||||
* If 'buffer' is not InvalidBuffer, the tuple table code acquires a pin
|
||||
* on the buffer which is held until the slot is cleared, so that the tuple
|
||||
* won't go away on us.
|
||||
*
|
||||
* shouldFree is normally set 'true' for tuples constructed on-the-fly.
|
||||
* It must always be 'false' for tuples that are stored in disk pages,
|
||||
* since we don't want to try to pfree those.
|
||||
*
|
||||
* Another case where it is 'false' is when the referenced tuple is held
|
||||
* in a tuple table slot belonging to a lower-level executor Proc node.
|
||||
* In this case the lower-level slot retains ownership and responsibility
|
||||
* for eventually releasing the tuple. When this method is used, we must
|
||||
* be certain that the upper-level Proc node will lose interest in the tuple
|
||||
* sooner than the lower-level one does! If you're not certain, copy the
|
||||
* lower-level tuple with heap_copytuple and let the upper-level table
|
||||
* slot assume ownership of the copy!
|
||||
*
|
||||
* Return value is just the passed-in slot pointer.
|
||||
* --------------------------------
|
||||
*/
|
||||
TupleTableSlot * /* return: slot passed */
|
||||
ExecStoreTuple(HeapTuple tuple, /* tuple to store */
|
||||
TupleTableSlot *slot, /* slot in which to store tuple */
|
||||
Buffer buffer, /* buffer associated with tuple */
|
||||
bool shouldFree) /* true if we call pfree() when we gc. */
|
||||
TupleTableSlot *
|
||||
ExecStoreTuple(HeapTuple tuple,
|
||||
TupleTableSlot *slot,
|
||||
Buffer buffer,
|
||||
bool shouldFree)
|
||||
{
|
||||
/* ----------------
|
||||
* sanity checks
|
||||
* ----------------
|
||||
*/
|
||||
Assert(slot != NULL);
|
||||
/* passing shouldFree=true for a tuple on a disk page is not sane */
|
||||
Assert(BufferIsValid(buffer) ? (!shouldFree) : true);
|
||||
|
||||
/* clear out the slot first */
|
||||
/* clear out any old contents of the slot */
|
||||
ExecClearTuple(slot);
|
||||
|
||||
/* ----------------
|
||||
@ -364,6 +383,12 @@ ExecStoreTuple(HeapTuple tuple, /* tuple to store */
|
||||
slot->ttc_buffer = buffer;
|
||||
slot->ttc_shouldFree = shouldFree;
|
||||
|
||||
/* If tuple is on a disk page, keep the page pinned as long as we hold
|
||||
* a pointer into it.
|
||||
*/
|
||||
if (BufferIsValid(buffer))
|
||||
IncrBufferRefCount(buffer);
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
@ -395,29 +420,20 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
|
||||
* ----------------
|
||||
*/
|
||||
if (slot->ttc_shouldFree && oldtuple != NULL)
|
||||
{
|
||||
/* ----------------
|
||||
* since a tuple may contain a pointer to
|
||||
* lock information allocated along with the
|
||||
* tuple, we have to be careful to free any
|
||||
* rule locks also -cim 1/17/90
|
||||
* ----------------
|
||||
*/
|
||||
pfree(oldtuple);
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* store NULL into the specified slot and return the slot.
|
||||
* - also set buffer to InvalidBuffer -cim 3/14/91
|
||||
* ----------------
|
||||
*/
|
||||
slot->val = (HeapTuple) NULL;
|
||||
|
||||
slot->ttc_shouldFree = true; /* probably useless code... */
|
||||
|
||||
/* ----------------
|
||||
* Drop the pin on the referenced buffer, if there is one.
|
||||
* ----------------
|
||||
*/
|
||||
if (BufferIsValid(slot->ttc_buffer))
|
||||
ReleaseBuffer(slot->ttc_buffer);
|
||||
|
||||
slot->ttc_buffer = InvalidBuffer;
|
||||
slot->ttc_shouldFree = true;
|
||||
|
||||
return slot;
|
||||
}
|
||||
@ -525,41 +541,6 @@ ExecSetNewSlotDescriptor(TupleTableSlot *slot, /* slot to change */
|
||||
|
||||
#endif
|
||||
|
||||
/* --------------------------------
|
||||
* ExecSlotBuffer
|
||||
*
|
||||
* This function is used to get the tuple descriptor associated
|
||||
* with the slot's tuple. Be very careful with this as it does not
|
||||
* balance the reference counts. If the buffer returned is stored
|
||||
* someplace else, then also use ExecIncrSlotBufferRefcnt().
|
||||
*
|
||||
* Now a macro in tuptable.h
|
||||
* --------------------------------
|
||||
*/
|
||||
|
||||
/* --------------------------------
|
||||
* ExecSetSlotBuffer
|
||||
*
|
||||
* This function is used to set the tuple descriptor associated
|
||||
* with the slot's tuple. Be very careful with this as it does not
|
||||
* balance the reference counts. If we're using this then we should
|
||||
* also use ExecIncrSlotBufferRefcnt().
|
||||
* --------------------------------
|
||||
*/
|
||||
#ifdef NOT_USED
|
||||
Buffer /* return: old slot buffer */
|
||||
ExecSetSlotBuffer(TupleTableSlot *slot, /* slot to change */
|
||||
Buffer b) /* tuple descriptor */
|
||||
{
|
||||
Buffer oldb = slot->ttc_buffer;
|
||||
|
||||
slot->ttc_buffer = b;
|
||||
|
||||
return oldb;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
* tuple table slot status predicates
|
||||
* ----------------------------------------------------------------
|
||||
@ -601,12 +582,7 @@ ExecSlotDescriptorIsNew(TupleTableSlot *slot) /* slot to inspect */
|
||||
|
||||
#define INIT_SLOT_ALLOC \
|
||||
tupleTable = (TupleTable) estate->es_tupleTable; \
|
||||
slot = ExecAllocTableSlot(tupleTable); \
|
||||
slot->val = (HeapTuple)NULL; \
|
||||
slot->ttc_shouldFree = true; \
|
||||
slot->ttc_tupleDescriptor = (TupleDesc)NULL; \
|
||||
slot->ttc_whichplan = -1;\
|
||||
slot->ttc_descIsNew = true;
|
||||
slot = ExecAllocTableSlot(tupleTable);
|
||||
|
||||
/* ----------------
|
||||
* ExecInitResultTupleSlot
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.25 1999/09/18 19:06:48 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.26 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -399,12 +399,13 @@ ExecProcAppend(Append *node)
|
||||
{
|
||||
/* ----------------
|
||||
* if the subplan gave us something then place a copy of
|
||||
* whatever we get into our result slot and return it, else..
|
||||
* whatever we get into our result slot and return it.
|
||||
*
|
||||
* Note we rely on the subplan to retain ownership of the
|
||||
* tuple for as long as we need it --- we don't copy it.
|
||||
* ----------------
|
||||
*/
|
||||
return ExecStoreTuple(result->val,
|
||||
result_slot, result->ttc_buffer, false);
|
||||
|
||||
return ExecStoreTuple(result->val, result_slot, InvalidBuffer, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -13,7 +13,7 @@
|
||||
* columns. (ie. tuples from the same group are consecutive)
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.29 1999/07/17 20:16:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.30 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -91,10 +91,12 @@ ExecGroupEveryTuple(Group *node)
|
||||
{
|
||||
grpstate->grp_useFirstTuple = FALSE;
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(grpstate->grp_firstTuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
InvalidBuffer,
|
||||
false);
|
||||
InvalidBuffer, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -129,10 +131,12 @@ ExecGroupEveryTuple(Group *node)
|
||||
}
|
||||
}
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(outerTuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
outerslot->ttc_buffer,
|
||||
false);
|
||||
InvalidBuffer, false);
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
@ -226,10 +230,12 @@ ExecGroupOneTuple(Group *node)
|
||||
*/
|
||||
projInfo = grpstate->csstate.cstate.cs_ProjInfo;
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(firsttuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
InvalidBuffer,
|
||||
false);
|
||||
InvalidBuffer, false);
|
||||
econtext->ecxt_scantuple = grpstate->csstate.css_ScanTupleSlot;
|
||||
resultSlot = ExecProject(projInfo, &isDone);
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.42 1999/08/12 00:42:43 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.43 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -125,14 +125,14 @@ IndexNext(IndexScan *node)
|
||||
{
|
||||
int iptr;
|
||||
|
||||
slot->ttc_buffer = InvalidBuffer;
|
||||
slot->ttc_shouldFree = false;
|
||||
ExecClearTuple(slot);
|
||||
if (estate->es_evTupleNull[node->scan.scanrelid - 1])
|
||||
{
|
||||
slot->val = NULL; /* must not free tuple! */
|
||||
return (slot);
|
||||
}
|
||||
return slot; /* return empty slot */
|
||||
|
||||
/* probably ought to use ExecStoreTuple here... */
|
||||
slot->val = estate->es_evTuple[node->scan.scanrelid - 1];
|
||||
slot->ttc_shouldFree = false;
|
||||
|
||||
for (iptr = 0; iptr < numIndices; iptr++)
|
||||
{
|
||||
scanstate->cstate.cs_ExprContext->ecxt_scantuple = slot;
|
||||
@ -142,6 +142,7 @@ IndexNext(IndexScan *node)
|
||||
}
|
||||
if (iptr == numIndices) /* would not be returned by indices */
|
||||
slot->val = NULL;
|
||||
|
||||
/* Flag for the next call that no more tuples */
|
||||
estate->es_evTupleNull[node->scan.scanrelid - 1] = true;
|
||||
return (slot);
|
||||
@ -192,7 +193,7 @@ IndexNext(IndexScan *node)
|
||||
* the scan state. Eventually we will only do this and not
|
||||
* return a tuple. Note: we pass 'false' because tuples
|
||||
* returned by amgetnext are pointers onto disk pages and
|
||||
* were not created with palloc() and so should not be pfree()'d.
|
||||
* must not be pfree()'d.
|
||||
* ----------------
|
||||
*/
|
||||
ExecStoreTuple(tuple, /* tuple to store */
|
||||
@ -200,6 +201,13 @@ IndexNext(IndexScan *node)
|
||||
buffer, /* buffer associated with tuple */
|
||||
false); /* don't pfree */
|
||||
|
||||
/*
|
||||
* At this point we have an extra pin on the buffer,
|
||||
* because ExecStoreTuple incremented the pin count.
|
||||
* Drop our local pin.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
|
||||
/*
|
||||
* We must check to see if the current tuple would have
|
||||
* been matched by an earlier index, so we don't double
|
||||
@ -223,8 +231,6 @@ IndexNext(IndexScan *node)
|
||||
else
|
||||
ExecClearTuple(slot);
|
||||
}
|
||||
if (BufferIsValid(buffer))
|
||||
ReleaseBuffer(buffer);
|
||||
}
|
||||
if (indexNumber < numIndices)
|
||||
{
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.25 1999/07/16 04:58:50 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.26 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -31,7 +31,7 @@
|
||||
* ExecMaterial
|
||||
*
|
||||
* The first time this is called, ExecMaterial retrieves tuples
|
||||
* this node's outer subplan and inserts them into a temporary
|
||||
* from this node's outer subplan and inserts them into a temporary
|
||||
* relation. After this is done, a flag is set indicating that
|
||||
* the subplan has been materialized. Once the relation is
|
||||
* materialized, the first tuple is then returned. Successive
|
||||
@ -41,7 +41,7 @@
|
||||
* Initial State:
|
||||
*
|
||||
* ExecMaterial assumes the temporary relation has been
|
||||
* created and openend by ExecInitMaterial during the prior
|
||||
* created and opened by ExecInitMaterial during the prior
|
||||
* InitPlan() phase.
|
||||
*
|
||||
* ----------------------------------------------------------------
|
||||
@ -116,18 +116,7 @@ ExecMaterial(Material *node)
|
||||
if (TupIsNull(slot))
|
||||
break;
|
||||
|
||||
/*
|
||||
* heap_insert changes something...
|
||||
*/
|
||||
if (slot->ttc_buffer != InvalidBuffer)
|
||||
heapTuple = heap_copytuple(slot->val);
|
||||
else
|
||||
heapTuple = slot->val;
|
||||
|
||||
heap_insert(tempRelation, heapTuple);
|
||||
|
||||
if (slot->ttc_buffer != InvalidBuffer)
|
||||
pfree(heapTuple);
|
||||
heap_insert(tempRelation, slot->val);
|
||||
|
||||
ExecClearTuple(slot);
|
||||
}
|
||||
@ -164,7 +153,7 @@ ExecMaterial(Material *node)
|
||||
|
||||
/* ----------------
|
||||
* at this point we know we have a sorted relation so
|
||||
* we preform a simple scan on it with amgetnext()..
|
||||
* we perform a simple scan on it with amgetnext()..
|
||||
* ----------------
|
||||
*/
|
||||
currentScanDesc = matstate->csstate.css_currentScanDesc;
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.28 1999/07/16 04:58:50 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.29 1999/09/24 00:24:23 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1153,15 +1153,18 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
|
||||
#define MERGEJOIN_NSLOTS 2
|
||||
/* ----------------
|
||||
* tuple table initialization
|
||||
*
|
||||
* XXX why aren't we getting a tuple table slot in the normal way?
|
||||
* ----------------
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, &mergestate->jstate);
|
||||
mjSlot = (TupleTableSlot *) palloc(sizeof(TupleTableSlot));
|
||||
mjSlot = makeNode(TupleTableSlot);
|
||||
mjSlot->val = NULL;
|
||||
mjSlot->ttc_shouldFree = true;
|
||||
mjSlot->ttc_tupleDescriptor = NULL;
|
||||
mjSlot->ttc_whichplan = -1;
|
||||
mjSlot->ttc_descIsNew = true;
|
||||
mjSlot->ttc_tupleDescriptor = NULL;
|
||||
mjSlot->ttc_buffer = InvalidBuffer;
|
||||
mjSlot->ttc_whichplan = -1;
|
||||
mergestate->mj_MarkedTupleSlot = mjSlot;
|
||||
|
||||
/* ----------------
|
||||
@ -1278,11 +1281,9 @@ ExecReScanMergeJoin(MergeJoin *node, ExprContext *exprCtxt, Plan *parent)
|
||||
TupleTableSlot *mjSlot = mergestate->mj_MarkedTupleSlot;
|
||||
|
||||
ExecClearTuple(mjSlot);
|
||||
mjSlot->val = NULL;
|
||||
mjSlot->ttc_shouldFree = true;
|
||||
mjSlot->ttc_tupleDescriptor = NULL;
|
||||
mjSlot->ttc_whichplan = -1;
|
||||
mjSlot->ttc_descIsNew = true;
|
||||
mjSlot->ttc_whichplan = -1;
|
||||
|
||||
mergestate->mj_JoinState = EXEC_MJ_INITIALIZE;
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.20 1999/07/16 04:58:52 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.21 1999/09/24 00:24:24 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -74,20 +74,20 @@ SeqNext(SeqScan *node)
|
||||
if (estate->es_evTuple != NULL &&
|
||||
estate->es_evTuple[node->scanrelid - 1] != NULL)
|
||||
{
|
||||
slot->ttc_buffer = InvalidBuffer;
|
||||
slot->ttc_shouldFree = false;
|
||||
ExecClearTuple(slot);
|
||||
if (estate->es_evTupleNull[node->scanrelid - 1])
|
||||
{
|
||||
slot->val = NULL; /* must not free tuple! */
|
||||
return (slot);
|
||||
}
|
||||
return slot; /* return empty slot */
|
||||
|
||||
/* probably ought to use ExecStoreTuple here... */
|
||||
slot->val = estate->es_evTuple[node->scanrelid - 1];
|
||||
slot->ttc_shouldFree = false;
|
||||
|
||||
/*
|
||||
* Note that unlike IndexScan, SeqScan never use keys in
|
||||
* heap_beginscan (and this is very bad) - so, here we have not
|
||||
* heap_beginscan (and this is very bad) - so, here we do not
|
||||
* check are keys ok or not.
|
||||
*/
|
||||
|
||||
/* Flag for the next call that no more tuples */
|
||||
estate->es_evTupleNull[node->scanrelid - 1] = true;
|
||||
return (slot);
|
||||
@ -104,7 +104,9 @@ SeqNext(SeqScan *node)
|
||||
* in our scan tuple slot and return the slot. Note: we pass 'false'
|
||||
* because tuples returned by heap_getnext() are pointers onto
|
||||
* disk pages and were not created with palloc() and so should not
|
||||
* be pfree()'d.
|
||||
* be pfree()'d. Note also that ExecStoreTuple will increment the
|
||||
* refcount of the buffer; the refcount will not be dropped until
|
||||
* the tuple table slot is cleared.
|
||||
* ----------------
|
||||
*/
|
||||
|
||||
@ -114,17 +116,6 @@ SeqNext(SeqScan *node)
|
||||
* this tuple */
|
||||
false); /* don't pfree this pointer */
|
||||
|
||||
/* ----------------
|
||||
* XXX -- mao says: The sequential scan for heap relations will
|
||||
* automatically unpin the buffer this tuple is on when we cross
|
||||
* a page boundary. The clearslot code also does this. We bump
|
||||
* the pin count on the page here, since we actually have two
|
||||
* pointers to it -- one in the scan desc and one in the tuple
|
||||
* table slot. --mar 20 91
|
||||
* ----------------
|
||||
*/
|
||||
ExecIncrSlotBufferRefcnt(slot);
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
@ -165,8 +165,6 @@ ExecInitSubPlan(SubPlan *node, EState *estate, Plan *parent)
|
||||
sp_estate->es_param_exec_vals = estate->es_param_exec_vals;
|
||||
sp_estate->es_tupleTable =
|
||||
ExecCreateTupleTable(ExecCountSlotsNode(node->plan) + 10);
|
||||
pfree(sp_estate->es_refcount);
|
||||
sp_estate->es_refcount = estate->es_refcount;
|
||||
sp_estate->es_snapshot = estate->es_snapshot;
|
||||
|
||||
if (!ExecInitNode(node->plan, sp_estate, NULL))
|
||||
|
Reference in New Issue
Block a user