mirror of
https://github.com/postgres/postgres.git
synced 2025-07-05 07:21:24 +03:00
Add parallel_leader_participation GUC.
Sometimes, for testing, it's useful to have the leader do nothing but read tuples from workers; and it's possible that could work out better even in production. Thomas Munro, reviewed by Amit Kapila and by me. A few final tweaks by me. Discussion: http://postgr.es/m/CAEepm=2U++Lp3bNTv2Bv_kkr5NE2pOyHhxU=G0YTa4ZhSYhHiw@mail.gmail.com
This commit is contained in:
@ -38,6 +38,7 @@
|
||||
#include "executor/nodeSubplan.h"
|
||||
#include "executor/tqueue.h"
|
||||
#include "miscadmin.h"
|
||||
#include "optimizer/planmain.h"
|
||||
#include "pgstat.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/rel.h"
|
||||
@ -73,7 +74,8 @@ ExecInitGather(Gather *node, EState *estate, int eflags)
|
||||
gatherstate->ps.ExecProcNode = ExecGather;
|
||||
|
||||
gatherstate->initialized = false;
|
||||
gatherstate->need_to_scan_locally = !node->single_copy;
|
||||
gatherstate->need_to_scan_locally =
|
||||
!node->single_copy && parallel_leader_participation;
|
||||
gatherstate->tuples_needed = -1;
|
||||
|
||||
/*
|
||||
@ -193,9 +195,9 @@ ExecGather(PlanState *pstate)
|
||||
node->nextreader = 0;
|
||||
}
|
||||
|
||||
/* Run plan locally if no workers or not single-copy. */
|
||||
/* Run plan locally if no workers or enabled and not single-copy. */
|
||||
node->need_to_scan_locally = (node->nreaders == 0)
|
||||
|| !gather->single_copy;
|
||||
|| (!gather->single_copy && parallel_leader_participation);
|
||||
node->initialized = true;
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "executor/tqueue.h"
|
||||
#include "lib/binaryheap.h"
|
||||
#include "miscadmin.h"
|
||||
#include "optimizer/planmain.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
@ -233,8 +234,9 @@ ExecGatherMerge(PlanState *pstate)
|
||||
}
|
||||
}
|
||||
|
||||
/* always allow leader to participate */
|
||||
node->need_to_scan_locally = true;
|
||||
/* allow leader to participate if enabled or no choice */
|
||||
if (parallel_leader_participation || node->nreaders == 0)
|
||||
node->need_to_scan_locally = true;
|
||||
node->initialized = true;
|
||||
}
|
||||
|
||||
|
@ -5137,7 +5137,6 @@ static double
|
||||
get_parallel_divisor(Path *path)
|
||||
{
|
||||
double parallel_divisor = path->parallel_workers;
|
||||
double leader_contribution;
|
||||
|
||||
/*
|
||||
* Early experience with parallel query suggests that when there is only
|
||||
@ -5150,9 +5149,14 @@ get_parallel_divisor(Path *path)
|
||||
* its time servicing each worker, and the remainder executing the
|
||||
* parallel plan.
|
||||
*/
|
||||
leader_contribution = 1.0 - (0.3 * path->parallel_workers);
|
||||
if (leader_contribution > 0)
|
||||
parallel_divisor += leader_contribution;
|
||||
if (parallel_leader_participation)
|
||||
{
|
||||
double leader_contribution;
|
||||
|
||||
leader_contribution = 1.0 - (0.3 * path->parallel_workers);
|
||||
if (leader_contribution > 0)
|
||||
parallel_divisor += leader_contribution;
|
||||
}
|
||||
|
||||
return parallel_divisor;
|
||||
}
|
||||
|
@ -61,6 +61,7 @@
|
||||
/* GUC parameters */
|
||||
double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
|
||||
int force_parallel_mode = FORCE_PARALLEL_OFF;
|
||||
bool parallel_leader_participation = true;
|
||||
|
||||
/* Hook for plugins to get control in planner() */
|
||||
planner_hook_type planner_hook = NULL;
|
||||
|
@ -1676,6 +1676,16 @@ static struct config_bool ConfigureNamesBool[] =
|
||||
NULL, NULL, NULL
|
||||
},
|
||||
|
||||
{
|
||||
{"parallel_leader_participation", PGC_USERSET, RESOURCES_ASYNCHRONOUS,
|
||||
gettext_noop("Controls whether Gather and Gather Merge also run subplans."),
|
||||
gettext_noop("Should gather nodes also run subplans, or just gather tuples?")
|
||||
},
|
||||
¶llel_leader_participation,
|
||||
true,
|
||||
NULL, NULL, NULL
|
||||
},
|
||||
|
||||
/* End-of-list marker */
|
||||
{
|
||||
{NULL, 0, 0, NULL, NULL}, NULL, false, NULL, NULL, NULL
|
||||
|
@ -163,6 +163,7 @@
|
||||
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#parallel_leader_particulation = on
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel queries
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
|
Reference in New Issue
Block a user