1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-15 19:21:59 +03:00

Post-PG 10 beta1 pgindent run

perltidy run not included.
This commit is contained in:
Bruce Momjian
2017-05-17 16:31:56 -04:00
parent 8a94332478
commit a6fd7b7a5f
310 changed files with 3338 additions and 3171 deletions

View File

@ -112,7 +112,7 @@ static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
RangeTblEntry *rte);
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
@ -648,6 +648,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
return;
case RTE_NAMEDTUPLESTORE:
/*
* tuplestore cannot be shared, at least without more
* infrastructure to support that.
@ -1579,7 +1580,7 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
total_subpaths,
pathkeys,
NULL,
partitioned_rels));
partitioned_rels));
}
}
@ -2220,10 +2221,10 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel)
* For each useful ordering, we can consider an order-preserving Gather
* Merge.
*/
foreach (lc, rel->partial_pathlist)
foreach(lc, rel->partial_pathlist)
{
Path *subpath = (Path *) lfirst(lc);
GatherMergePath *path;
Path *subpath = (Path *) lfirst(lc);
GatherMergePath *path;
if (subpath->pathkeys == NIL)
continue;

View File

@ -664,8 +664,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
{
/*
* For index only scans compute workers based on number of index pages
* fetched; the number of heap pages we fetch might be so small as
* to effectively rule out parallelism, which we don't want to do.
* fetched; the number of heap pages we fetch might be so small as to
* effectively rule out parallelism, which we don't want to do.
*/
if (indexonly)
rand_heap_pages = -1;
@ -2188,7 +2188,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
/* For partial paths, scale row estimate. */
if (path->path.parallel_workers > 0)
{
double parallel_divisor = get_parallel_divisor(&path->path);
double parallel_divisor = get_parallel_divisor(&path->path);
path->path.rows =
clamp_row_est(path->path.rows / parallel_divisor);
@ -2624,7 +2624,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
/* For partial paths, scale row estimate. */
if (path->jpath.path.parallel_workers > 0)
{
double parallel_divisor = get_parallel_divisor(&path->jpath.path);
double parallel_divisor = get_parallel_divisor(&path->jpath.path);
path->jpath.path.rows =
clamp_row_est(path->jpath.path.rows / parallel_divisor);
@ -3029,7 +3029,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
/* For partial paths, scale row estimate. */
if (path->jpath.path.parallel_workers > 0)
{
double parallel_divisor = get_parallel_divisor(&path->jpath.path);
double parallel_divisor = get_parallel_divisor(&path->jpath.path);
path->jpath.path.rows =
clamp_row_est(path->jpath.path.rows / parallel_divisor);

View File

@ -1073,8 +1073,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
true);
/*
* if, after costing the path, we find that it's not worth
* using parallel workers, just free it.
* if, after costing the path, we find that it's not worth using
* parallel workers, just free it.
*/
if (ipath->path.parallel_workers > 0)
add_partial_path(rel, (Path *) ipath);