mirror of
https://github.com/postgres/postgres.git
synced 2025-07-03 20:02:46 +03:00
Try again to fix accumulation of parallel worker instrumentation.
When a Gather or Gather Merge node is started and stopped multiple
times, accumulate instrumentation data only once, at the end, instead
of after each execution, to avoid recording inflated totals.
Commit 778e78ae9f
, the previous attempt
at a fix, instead reset the state after every execution, which worked
for the general instrumentation data but had problems for the additional
instrumentation specific to Sort and Hash nodes.
Report by hubert depesz lubaczewski. Analysis and fix by Amit Kapila,
following a design proposal from Thomas Munro, with a comment tweak
by me.
Discussion: http://postgr.es/m/20171127175631.GA405@depesz.com
This commit is contained in:
@ -899,12 +899,8 @@ ExecParallelReInitializeDSM(PlanState *planstate,
|
||||
pcxt);
|
||||
break;
|
||||
case T_HashState:
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecHashReInitializeDSM((HashState *) planstate, pcxt);
|
||||
break;
|
||||
case T_SortState:
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecSortReInitializeDSM((SortState *) planstate, pcxt);
|
||||
/* these nodes have DSM state, but no reinitialization is required */
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -977,7 +973,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
|
||||
|
||||
/*
|
||||
* Finish parallel execution. We wait for parallel workers to finish, and
|
||||
* accumulate their buffer usage and instrumentation.
|
||||
* accumulate their buffer usage.
|
||||
*/
|
||||
void
|
||||
ExecParallelFinish(ParallelExecutorInfo *pei)
|
||||
@ -1023,23 +1019,23 @@ ExecParallelFinish(ParallelExecutorInfo *pei)
|
||||
for (i = 0; i < nworkers; i++)
|
||||
InstrAccumParallelQuery(&pei->buffer_usage[i]);
|
||||
|
||||
/* Finally, accumulate instrumentation, if any. */
|
||||
if (pei->instrumentation)
|
||||
ExecParallelRetrieveInstrumentation(pei->planstate,
|
||||
pei->instrumentation);
|
||||
|
||||
pei->finished = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clean up whatever ParallelExecutorInfo resources still exist after
|
||||
* ExecParallelFinish. We separate these routines because someone might
|
||||
* want to examine the contents of the DSM after ExecParallelFinish and
|
||||
* before calling this routine.
|
||||
* Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo
|
||||
* resources still exist after ExecParallelFinish. We separate these
|
||||
* routines because someone might want to examine the contents of the DSM
|
||||
* after ExecParallelFinish and before calling this routine.
|
||||
*/
|
||||
void
|
||||
ExecParallelCleanup(ParallelExecutorInfo *pei)
|
||||
{
|
||||
/* Accumulate instrumentation, if any. */
|
||||
if (pei->instrumentation)
|
||||
ExecParallelRetrieveInstrumentation(pei->planstate,
|
||||
pei->instrumentation);
|
||||
|
||||
/* Free any serialized parameters. */
|
||||
if (DsaPointerIsValid(pei->param_exec))
|
||||
{
|
||||
|
Reference in New Issue
Block a user