mirror of
https://github.com/postgres/postgres.git
synced 2025-10-28 11:55:03 +03:00
Add parallel-aware hash joins.
Introduce parallel-aware hash joins that appear in EXPLAIN plans as Parallel
Hash Join with Parallel Hash. While hash joins could already appear in
parallel queries, they were previously always parallel-oblivious and had a
partial subplan only on the outer side, meaning that the work of the inner
subplan was duplicated in every worker.
After this commit, the planner will consider using a partial subplan on the
inner side too, using the Parallel Hash node to divide the work over the
available CPU cores and combine its results in shared memory. If the join
needs to be split into multiple batches in order to respect work_mem, then
workers process different batches as much as possible and then work together
on the remaining batches.
The advantages of a parallel-aware hash join over a parallel-oblivious hash
join used in a parallel query are that it:
* avoids wasting memory on duplicated hash tables
* avoids wasting disk space on duplicated batch files
* divides the work of building the hash table over the CPUs
One disadvantage is that there is some communication between the participating
CPUs which might outweigh the benefits of parallelism in the case of small
hash tables. This is avoided by the planner's existing reluctance to supply
partial plans for small scans, but it may be necessary to estimate
synchronization costs in future if that situation changes. Another is that
outer batch 0 must be written to disk if multiple batches are required.
A potential future advantage of parallel-aware hash joins is that right and
full outer joins could be supported, since there is a single set of matched
bits for each hashtable, but that is not yet implemented.
A new GUC enable_parallel_hash is defined to control the feature, defaulting
to on.
Author: Thomas Munro
Reviewed-By: Andres Freund, Robert Haas
Tested-By: Rafia Sabih, Prabhat Sahu
Discussion:
https://postgr.es/m/CAEepm=2W=cOkiZxcg6qiFQP-dHUe09aqTrEMM7yJDrHMhDv_RA@mail.gmail.com
https://postgr.es/m/CAEepm=37HKyJ4U6XOLi=JgfSHM3o6B-GaeO-6hkOmneTDkH+Uw@mail.gmail.com
This commit is contained in:
@@ -129,6 +129,7 @@ bool enable_hashjoin = true;
|
||||
bool enable_gathermerge = true;
|
||||
bool enable_partition_wise_join = false;
|
||||
bool enable_parallel_append = true;
|
||||
bool enable_parallel_hash = true;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
@@ -3130,16 +3131,19 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
|
||||
JoinType jointype,
|
||||
List *hashclauses,
|
||||
Path *outer_path, Path *inner_path,
|
||||
JoinPathExtraData *extra)
|
||||
JoinPathExtraData *extra,
|
||||
bool parallel_hash)
|
||||
{
|
||||
Cost startup_cost = 0;
|
||||
Cost run_cost = 0;
|
||||
double outer_path_rows = outer_path->rows;
|
||||
double inner_path_rows = inner_path->rows;
|
||||
double inner_path_rows_total = inner_path_rows;
|
||||
int num_hashclauses = list_length(hashclauses);
|
||||
int numbuckets;
|
||||
int numbatches;
|
||||
int num_skew_mcvs;
|
||||
size_t space_allowed; /* unused */
|
||||
|
||||
/* cost of source data */
|
||||
startup_cost += outer_path->startup_cost;
|
||||
@@ -3160,6 +3164,15 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
|
||||
* inner_path_rows;
|
||||
run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
|
||||
|
||||
/*
|
||||
* If this is a parallel hash build, then the value we have for
|
||||
* inner_rows_total currently refers only to the rows returned by each
|
||||
* participant. For shared hash table size estimation, we need the total
|
||||
* number, so we need to undo the division.
|
||||
*/
|
||||
if (parallel_hash)
|
||||
inner_path_rows_total *= get_parallel_divisor(inner_path);
|
||||
|
||||
/*
|
||||
* Get hash table size that executor would use for inner relation.
|
||||
*
|
||||
@@ -3170,9 +3183,12 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
|
||||
* XXX at some point it might be interesting to try to account for skew
|
||||
* optimization in the cost estimate, but for now, we don't.
|
||||
*/
|
||||
ExecChooseHashTableSize(inner_path_rows,
|
||||
ExecChooseHashTableSize(inner_path_rows_total,
|
||||
inner_path->pathtarget->width,
|
||||
true, /* useskew */
|
||||
parallel_hash, /* try_combined_work_mem */
|
||||
outer_path->parallel_workers,
|
||||
&space_allowed,
|
||||
&numbuckets,
|
||||
&numbatches,
|
||||
&num_skew_mcvs);
|
||||
@@ -3204,6 +3220,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
|
||||
workspace->run_cost = run_cost;
|
||||
workspace->numbuckets = numbuckets;
|
||||
workspace->numbatches = numbatches;
|
||||
workspace->inner_rows_total = inner_path_rows_total;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3226,6 +3243,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
|
||||
Path *inner_path = path->jpath.innerjoinpath;
|
||||
double outer_path_rows = outer_path->rows;
|
||||
double inner_path_rows = inner_path->rows;
|
||||
double inner_path_rows_total = workspace->inner_rows_total;
|
||||
List *hashclauses = path->path_hashclauses;
|
||||
Cost startup_cost = workspace->startup_cost;
|
||||
Cost run_cost = workspace->run_cost;
|
||||
@@ -3266,6 +3284,9 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
|
||||
/* mark the path with estimated # of batches */
|
||||
path->num_batches = numbatches;
|
||||
|
||||
/* store the total number of tuples (sum of partial row estimates) */
|
||||
path->inner_rows_total = inner_path_rows_total;
|
||||
|
||||
/* and compute the number of "virtual" buckets in the whole join */
|
||||
virtualbuckets = (double) numbuckets * (double) numbatches;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user