mirror of
https://github.com/postgres/postgres.git
synced 2025-07-09 22:41:56 +03:00
Remove memory leak protection from Gather and Gather Merge nodes.
Before commit6b65a7fe62
, tqueue.c could perform tuple remapping and thus leak memory, which is why commitaf33039317
made TupleQueueReaderNext run in a short-lived context. Now, however, tqueue.c has been reduced to a shadow of its former self, and there shouldn't be any chance of leaks any more. Accordingly, remove some tuple copying and memory context manipulation to speed up processing. Patch by me, reviewed by Amit Kapila. Some testing by Rafia Sabih. Discussion: http://postgr.es/m/CAA4eK1LSDydwrNjmYSNkfJ3ZivGSWH9SVswh6QpNzsMdj_oOQA@mail.gmail.com
This commit is contained in:
@ -131,7 +131,6 @@ static TupleTableSlot *
|
||||
ExecGather(PlanState *pstate)
|
||||
{
|
||||
GatherState *node = castNode(GatherState, pstate);
|
||||
TupleTableSlot *fslot = node->funnel_slot;
|
||||
TupleTableSlot *slot;
|
||||
ExprContext *econtext;
|
||||
|
||||
@ -205,11 +204,8 @@ ExecGather(PlanState *pstate)
|
||||
|
||||
/*
|
||||
* Reset per-tuple memory context to free any expression evaluation
|
||||
* storage allocated in the previous tuple cycle. This will also clear
|
||||
* any previous tuple returned by a TupleQueueReader; to make sure we
|
||||
* don't leave a dangling pointer around, clear the working slot first.
|
||||
* storage allocated in the previous tuple cycle.
|
||||
*/
|
||||
ExecClearTuple(fslot);
|
||||
econtext = node->ps.ps_ExprContext;
|
||||
ResetExprContext(econtext);
|
||||
|
||||
@ -258,7 +254,6 @@ gather_getnext(GatherState *gatherstate)
|
||||
PlanState *outerPlan = outerPlanState(gatherstate);
|
||||
TupleTableSlot *outerTupleSlot;
|
||||
TupleTableSlot *fslot = gatherstate->funnel_slot;
|
||||
MemoryContext tupleContext = gatherstate->ps.ps_ExprContext->ecxt_per_tuple_memory;
|
||||
HeapTuple tup;
|
||||
|
||||
while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
|
||||
@ -267,12 +262,7 @@ gather_getnext(GatherState *gatherstate)
|
||||
|
||||
if (gatherstate->nreaders > 0)
|
||||
{
|
||||
MemoryContext oldContext;
|
||||
|
||||
/* Run TupleQueueReaders in per-tuple context */
|
||||
oldContext = MemoryContextSwitchTo(tupleContext);
|
||||
tup = gather_readnext(gatherstate);
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
if (HeapTupleIsValid(tup))
|
||||
{
|
||||
@ -280,7 +270,7 @@ gather_getnext(GatherState *gatherstate)
|
||||
fslot, /* slot in which to store the tuple */
|
||||
InvalidBuffer, /* buffer associated with this
|
||||
* tuple */
|
||||
false); /* slot should not pfree tuple */
|
||||
true); /* pfree tuple when done with it */
|
||||
return fslot;
|
||||
}
|
||||
}
|
||||
|
@ -609,7 +609,7 @@ load_tuple_array(GatherMergeState *gm_state, int reader)
|
||||
&tuple_buffer->done);
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
break;
|
||||
tuple_buffer->tuple[i] = heap_copytuple(tuple);
|
||||
tuple_buffer->tuple[i] = tuple;
|
||||
tuple_buffer->nTuples++;
|
||||
}
|
||||
}
|
||||
@ -673,7 +673,6 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
|
||||
&tuple_buffer->done);
|
||||
if (!HeapTupleIsValid(tup))
|
||||
return false;
|
||||
tup = heap_copytuple(tup);
|
||||
|
||||
/*
|
||||
* Attempt to read more tuples in nowait mode and store them in the
|
||||
@ -703,20 +702,13 @@ gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait,
|
||||
{
|
||||
TupleQueueReader *reader;
|
||||
HeapTuple tup;
|
||||
MemoryContext oldContext;
|
||||
MemoryContext tupleContext;
|
||||
|
||||
/* Check for async events, particularly messages from workers. */
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/* Attempt to read a tuple. */
|
||||
reader = gm_state->reader[nreader - 1];
|
||||
|
||||
/* Run TupleQueueReaders in per-tuple context */
|
||||
tupleContext = gm_state->ps.ps_ExprContext->ecxt_per_tuple_memory;
|
||||
oldContext = MemoryContextSwitchTo(tupleContext);
|
||||
tup = TupleQueueReaderNext(reader, nowait, done);
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
return tup;
|
||||
}
|
||||
|
@ -161,6 +161,8 @@ DestroyTupleQueueReader(TupleQueueReader *reader)
|
||||
* is set to true when there are no remaining tuples and otherwise to false.
|
||||
*
|
||||
* The returned tuple, if any, is allocated in CurrentMemoryContext.
|
||||
* Note that this routine must not leak memory! (We used to allow that,
|
||||
* but not any more.)
|
||||
*
|
||||
* Even when shm_mq_receive() returns SHM_MQ_WOULD_BLOCK, this can still
|
||||
* accumulate bytes from a partially-read message, so it's useful to call
|
||||
|
Reference in New Issue
Block a user