mirror of
https://github.com/postgres/postgres.git
synced 2025-07-02 09:02:37 +03:00
Implement Incremental Sort
Incremental Sort is an optimized variant of multikey sort for cases when the input is already sorted by a prefix of the requested sort keys. For example when the relation is already sorted by (key1, key2) and we need to sort it by (key1, key2, key3) we can simply split the input rows into groups having equal values in (key1, key2), and only sort/compare the remaining column key3. This has a number of benefits: - Reduced memory consumption, because only a single group (determined by values in the sorted prefix) needs to be kept in memory. This may also eliminate the need to spill to disk. - Lower startup cost, because Incremental Sort produce results after each prefix group, which is beneficial for plans where startup cost matters (like for example queries with LIMIT clause). We consider both Sort and Incremental Sort, and decide based on costing. The implemented algorithm operates in two different modes: - Fetching a minimum number of tuples without check of equality on the prefix keys, and sorting on all columns when safe. - Fetching all tuples for a single prefix group and then sorting by comparing only the remaining (non-prefix) keys. We always start in the first mode, and employ a heuristic to switch into the second mode if we believe it's beneficial - the goal is to minimize the number of unnecessary comparions while keeping memory consumption below work_mem. This is a very old patch series. The idea was originally proposed by Alexander Korotkov back in 2013, and then revived in 2017. In 2018 the patch was taken over by James Coleman, who wrote and rewrote most of the current code. There were many reviewers/contributors since 2013 - I've done my best to pick the most active ones, and listed them in this commit message. Author: James Coleman, Alexander Korotkov Reviewed-by: Tomas Vondra, Andreas Karlsson, Marti Raudsepp, Peter Geoghegan, Robert Haas, Thomas Munro, Antonin Houska, Andres Freund, Alexander Kuzmenkov Discussion: https://postgr.es/m/CAPpHfdscOX5an71nHd8WSUH6GNOCf=V7wgDaTXdDd9=goN-gfA@mail.gmail.com Discussion: https://postgr.es/m/CAPpHfds1waRZ=NOmueYq0sx1ZSCnt+5QJvizT8ndT2=etZEeAQ@mail.gmail.com
This commit is contained in:
@ -82,6 +82,8 @@ static void show_upper_qual(List *qual, const char *qlabel,
|
||||
ExplainState *es);
|
||||
static void show_sort_keys(SortState *sortstate, List *ancestors,
|
||||
ExplainState *es);
|
||||
static void show_incremental_sort_keys(IncrementalSortState *incrsortstate,
|
||||
List *ancestors, ExplainState *es);
|
||||
static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
|
||||
ExplainState *es);
|
||||
static void show_agg_keys(AggState *astate, List *ancestors,
|
||||
@ -95,7 +97,7 @@ static void show_grouping_set_keys(PlanState *planstate,
|
||||
static void show_group_keys(GroupState *gstate, List *ancestors,
|
||||
ExplainState *es);
|
||||
static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
|
||||
int nkeys, AttrNumber *keycols,
|
||||
int nkeys, int nPresortedKeys, AttrNumber *keycols,
|
||||
Oid *sortOperators, Oid *collations, bool *nullsFirst,
|
||||
List *ancestors, ExplainState *es);
|
||||
static void show_sortorder_options(StringInfo buf, Node *sortexpr,
|
||||
@ -103,6 +105,8 @@ static void show_sortorder_options(StringInfo buf, Node *sortexpr,
|
||||
static void show_tablesample(TableSampleClause *tsc, PlanState *planstate,
|
||||
List *ancestors, ExplainState *es);
|
||||
static void show_sort_info(SortState *sortstate, ExplainState *es);
|
||||
static void show_incremental_sort_info(IncrementalSortState *incrsortstate,
|
||||
ExplainState *es);
|
||||
static void show_hash_info(HashState *hashstate, ExplainState *es);
|
||||
static void show_hashagg_info(AggState *hashstate, ExplainState *es);
|
||||
static void show_tidbitmap_info(BitmapHeapScanState *planstate,
|
||||
@ -1278,6 +1282,9 @@ ExplainNode(PlanState *planstate, List *ancestors,
|
||||
case T_Sort:
|
||||
pname = sname = "Sort";
|
||||
break;
|
||||
case T_IncrementalSort:
|
||||
pname = sname = "Incremental Sort";
|
||||
break;
|
||||
case T_Group:
|
||||
pname = sname = "Group";
|
||||
break;
|
||||
@ -1937,6 +1944,12 @@ ExplainNode(PlanState *planstate, List *ancestors,
|
||||
show_sort_keys(castNode(SortState, planstate), ancestors, es);
|
||||
show_sort_info(castNode(SortState, planstate), es);
|
||||
break;
|
||||
case T_IncrementalSort:
|
||||
show_incremental_sort_keys(castNode(IncrementalSortState, planstate),
|
||||
ancestors, es);
|
||||
show_incremental_sort_info(castNode(IncrementalSortState, planstate),
|
||||
es);
|
||||
break;
|
||||
case T_MergeAppend:
|
||||
show_merge_append_keys(castNode(MergeAppendState, planstate),
|
||||
ancestors, es);
|
||||
@ -2270,12 +2283,29 @@ show_sort_keys(SortState *sortstate, List *ancestors, ExplainState *es)
|
||||
Sort *plan = (Sort *) sortstate->ss.ps.plan;
|
||||
|
||||
show_sort_group_keys((PlanState *) sortstate, "Sort Key",
|
||||
plan->numCols, plan->sortColIdx,
|
||||
plan->numCols, 0, plan->sortColIdx,
|
||||
plan->sortOperators, plan->collations,
|
||||
plan->nullsFirst,
|
||||
ancestors, es);
|
||||
}
|
||||
|
||||
/*
|
||||
* Show the sort keys for a IncrementalSort node.
|
||||
*/
|
||||
static void
|
||||
show_incremental_sort_keys(IncrementalSortState *incrsortstate,
|
||||
List *ancestors, ExplainState *es)
|
||||
{
|
||||
IncrementalSort *plan = (IncrementalSort *) incrsortstate->ss.ps.plan;
|
||||
|
||||
show_sort_group_keys((PlanState *) incrsortstate, "Sort Key",
|
||||
plan->sort.numCols, plan->nPresortedCols,
|
||||
plan->sort.sortColIdx,
|
||||
plan->sort.sortOperators, plan->sort.collations,
|
||||
plan->sort.nullsFirst,
|
||||
ancestors, es);
|
||||
}
|
||||
|
||||
/*
|
||||
* Likewise, for a MergeAppend node.
|
||||
*/
|
||||
@ -2286,7 +2316,7 @@ show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
|
||||
MergeAppend *plan = (MergeAppend *) mstate->ps.plan;
|
||||
|
||||
show_sort_group_keys((PlanState *) mstate, "Sort Key",
|
||||
plan->numCols, plan->sortColIdx,
|
||||
plan->numCols, 0, plan->sortColIdx,
|
||||
plan->sortOperators, plan->collations,
|
||||
plan->nullsFirst,
|
||||
ancestors, es);
|
||||
@ -2310,7 +2340,7 @@ show_agg_keys(AggState *astate, List *ancestors,
|
||||
show_grouping_sets(outerPlanState(astate), plan, ancestors, es);
|
||||
else
|
||||
show_sort_group_keys(outerPlanState(astate), "Group Key",
|
||||
plan->numCols, plan->grpColIdx,
|
||||
plan->numCols, 0, plan->grpColIdx,
|
||||
NULL, NULL, NULL,
|
||||
ancestors, es);
|
||||
|
||||
@ -2379,7 +2409,7 @@ show_grouping_set_keys(PlanState *planstate,
|
||||
if (sortnode)
|
||||
{
|
||||
show_sort_group_keys(planstate, "Sort Key",
|
||||
sortnode->numCols, sortnode->sortColIdx,
|
||||
sortnode->numCols, 0, sortnode->sortColIdx,
|
||||
sortnode->sortOperators, sortnode->collations,
|
||||
sortnode->nullsFirst,
|
||||
ancestors, es);
|
||||
@ -2436,7 +2466,7 @@ show_group_keys(GroupState *gstate, List *ancestors,
|
||||
/* The key columns refer to the tlist of the child plan */
|
||||
ancestors = lcons(plan, ancestors);
|
||||
show_sort_group_keys(outerPlanState(gstate), "Group Key",
|
||||
plan->numCols, plan->grpColIdx,
|
||||
plan->numCols, 0, plan->grpColIdx,
|
||||
NULL, NULL, NULL,
|
||||
ancestors, es);
|
||||
ancestors = list_delete_first(ancestors);
|
||||
@ -2449,13 +2479,14 @@ show_group_keys(GroupState *gstate, List *ancestors,
|
||||
*/
|
||||
static void
|
||||
show_sort_group_keys(PlanState *planstate, const char *qlabel,
|
||||
int nkeys, AttrNumber *keycols,
|
||||
int nkeys, int nPresortedKeys, AttrNumber *keycols,
|
||||
Oid *sortOperators, Oid *collations, bool *nullsFirst,
|
||||
List *ancestors, ExplainState *es)
|
||||
{
|
||||
Plan *plan = planstate->plan;
|
||||
List *context;
|
||||
List *result = NIL;
|
||||
List *resultPresorted = NIL;
|
||||
StringInfoData sortkeybuf;
|
||||
bool useprefix;
|
||||
int keyno;
|
||||
@ -2495,9 +2526,13 @@ show_sort_group_keys(PlanState *planstate, const char *qlabel,
|
||||
nullsFirst[keyno]);
|
||||
/* Emit one property-list item per sort key */
|
||||
result = lappend(result, pstrdup(sortkeybuf.data));
|
||||
if (keyno < nPresortedKeys)
|
||||
resultPresorted = lappend(resultPresorted, exprstr);
|
||||
}
|
||||
|
||||
ExplainPropertyList(qlabel, result, es);
|
||||
if (nPresortedKeys > 0)
|
||||
ExplainPropertyList("Presorted Key", resultPresorted, es);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2711,6 +2746,196 @@ show_sort_info(SortState *sortstate, ExplainState *es)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Incremental sort nodes sort in (a potentially very large number of) batches,
|
||||
* so EXPLAIN ANALYZE needs to roll up the tuplesort stats from each batch into
|
||||
* an intelligible summary.
|
||||
*
|
||||
* This function is used for both a non-parallel node and each worker in a
|
||||
* parallel incremental sort node.
|
||||
*/
|
||||
static void
|
||||
show_incremental_sort_group_info(IncrementalSortGroupInfo *groupInfo,
|
||||
const char *groupLabel, bool indent, ExplainState *es)
|
||||
{
|
||||
ListCell *methodCell;
|
||||
List *methodNames = NIL;
|
||||
|
||||
/* Generate a list of sort methods used across all groups. */
|
||||
for (int bit = 0; bit < sizeof(bits32); ++bit)
|
||||
{
|
||||
if (groupInfo->sortMethods & (1 << bit))
|
||||
{
|
||||
TuplesortMethod sortMethod = (1 << bit);
|
||||
const char *methodName;
|
||||
|
||||
methodName = tuplesort_method_name(sortMethod);
|
||||
methodNames = lappend(methodNames, unconstify(char *, methodName));
|
||||
}
|
||||
}
|
||||
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
{
|
||||
if (indent)
|
||||
appendStringInfoSpaces(es->str, es->indent * 2);
|
||||
appendStringInfo(es->str, "%s Groups: %ld Sort Method", groupLabel,
|
||||
groupInfo->groupCount);
|
||||
/* plural/singular based on methodNames size */
|
||||
if (list_length(methodNames) > 1)
|
||||
appendStringInfo(es->str, "s: ");
|
||||
else
|
||||
appendStringInfo(es->str, ": ");
|
||||
foreach(methodCell, methodNames)
|
||||
{
|
||||
appendStringInfo(es->str, "%s", (char *) methodCell->ptr_value);
|
||||
if (foreach_current_index(methodCell) < list_length(methodNames) - 1)
|
||||
appendStringInfo(es->str, ", ");
|
||||
}
|
||||
|
||||
if (groupInfo->maxMemorySpaceUsed > 0)
|
||||
{
|
||||
long avgSpace = groupInfo->totalMemorySpaceUsed / groupInfo->groupCount;
|
||||
const char *spaceTypeName;
|
||||
|
||||
spaceTypeName = tuplesort_space_type_name(SORT_SPACE_TYPE_MEMORY);
|
||||
appendStringInfo(es->str, " %s: avg=%ldkB peak=%ldkB",
|
||||
spaceTypeName, avgSpace,
|
||||
groupInfo->maxMemorySpaceUsed);
|
||||
}
|
||||
|
||||
if (groupInfo->maxDiskSpaceUsed > 0)
|
||||
{
|
||||
long avgSpace = groupInfo->totalDiskSpaceUsed / groupInfo->groupCount;
|
||||
|
||||
const char *spaceTypeName;
|
||||
|
||||
spaceTypeName = tuplesort_space_type_name(SORT_SPACE_TYPE_DISK);
|
||||
/* Add a semicolon separator only if memory stats were printed. */
|
||||
if (groupInfo->maxMemorySpaceUsed > 0)
|
||||
appendStringInfo(es->str, ";");
|
||||
appendStringInfo(es->str, " %s: avg=%ldkB peak=%ldkB",
|
||||
spaceTypeName, avgSpace,
|
||||
groupInfo->maxDiskSpaceUsed);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
StringInfoData groupName;
|
||||
|
||||
initStringInfo(&groupName);
|
||||
appendStringInfo(&groupName, "%s Groups", groupLabel);
|
||||
ExplainOpenGroup("Incremental Sort Groups", groupName.data, true, es);
|
||||
ExplainPropertyInteger("Group Count", NULL, groupInfo->groupCount, es);
|
||||
|
||||
ExplainPropertyList("Sort Methods Used", methodNames, es);
|
||||
|
||||
if (groupInfo->maxMemorySpaceUsed > 0)
|
||||
{
|
||||
long avgSpace = groupInfo->totalMemorySpaceUsed / groupInfo->groupCount;
|
||||
const char *spaceTypeName;
|
||||
StringInfoData memoryName;
|
||||
|
||||
spaceTypeName = tuplesort_space_type_name(SORT_SPACE_TYPE_MEMORY);
|
||||
initStringInfo(&memoryName);
|
||||
appendStringInfo(&memoryName, "Sort Space %s", spaceTypeName);
|
||||
ExplainOpenGroup("Sort Space", memoryName.data, true, es);
|
||||
|
||||
ExplainPropertyInteger("Average Sort Space Used", "kB", avgSpace, es);
|
||||
ExplainPropertyInteger("Maximum Sort Space Used", "kB",
|
||||
groupInfo->maxMemorySpaceUsed, es);
|
||||
|
||||
ExplainCloseGroup("Sort Spaces", memoryName.data, true, es);
|
||||
}
|
||||
if (groupInfo->maxDiskSpaceUsed > 0)
|
||||
{
|
||||
long avgSpace = groupInfo->totalDiskSpaceUsed / groupInfo->groupCount;
|
||||
const char *spaceTypeName;
|
||||
StringInfoData diskName;
|
||||
|
||||
spaceTypeName = tuplesort_space_type_name(SORT_SPACE_TYPE_DISK);
|
||||
initStringInfo(&diskName);
|
||||
appendStringInfo(&diskName, "Sort Space %s", spaceTypeName);
|
||||
ExplainOpenGroup("Sort Space", diskName.data, true, es);
|
||||
|
||||
ExplainPropertyInteger("Average Sort Space Used", "kB", avgSpace, es);
|
||||
ExplainPropertyInteger("Maximum Sort Space Used", "kB",
|
||||
groupInfo->maxDiskSpaceUsed, es);
|
||||
|
||||
ExplainCloseGroup("Sort Spaces", diskName.data, true, es);
|
||||
}
|
||||
|
||||
ExplainCloseGroup("Incremental Sort Groups", groupName.data, true, es);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If it's EXPLAIN ANALYZE, show tuplesort stats for a incremental sort node
|
||||
*/
|
||||
static void
|
||||
show_incremental_sort_info(IncrementalSortState *incrsortstate,
|
||||
ExplainState *es)
|
||||
{
|
||||
IncrementalSortGroupInfo *fullsortGroupInfo;
|
||||
IncrementalSortGroupInfo *prefixsortGroupInfo;
|
||||
|
||||
fullsortGroupInfo = &incrsortstate->incsort_info.fullsortGroupInfo;
|
||||
|
||||
if (!(es->analyze && fullsortGroupInfo->groupCount > 0))
|
||||
return;
|
||||
|
||||
show_incremental_sort_group_info(fullsortGroupInfo, "Full-sort", true, es);
|
||||
prefixsortGroupInfo = &incrsortstate->incsort_info.prefixsortGroupInfo;
|
||||
if (prefixsortGroupInfo->groupCount > 0)
|
||||
{
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
appendStringInfo(es->str, " ");
|
||||
show_incremental_sort_group_info(prefixsortGroupInfo, "Presorted", false, es);
|
||||
}
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
appendStringInfo(es->str, "\n");
|
||||
|
||||
if (incrsortstate->shared_info != NULL)
|
||||
{
|
||||
int n;
|
||||
bool indent_first_line;
|
||||
|
||||
for (n = 0; n < incrsortstate->shared_info->num_workers; n++)
|
||||
{
|
||||
IncrementalSortInfo *incsort_info =
|
||||
&incrsortstate->shared_info->sinfo[n];
|
||||
|
||||
/*
|
||||
* If a worker hasn't process any sort groups at all, then exclude
|
||||
* it from output since it either didn't launch or didn't
|
||||
* contribute anything meaningful.
|
||||
*/
|
||||
fullsortGroupInfo = &incsort_info->fullsortGroupInfo;
|
||||
prefixsortGroupInfo = &incsort_info->prefixsortGroupInfo;
|
||||
if (fullsortGroupInfo->groupCount == 0 &&
|
||||
prefixsortGroupInfo->groupCount == 0)
|
||||
continue;
|
||||
|
||||
if (es->workers_state)
|
||||
ExplainOpenWorker(n, es);
|
||||
|
||||
indent_first_line = es->workers_state == NULL || es->verbose;
|
||||
show_incremental_sort_group_info(fullsortGroupInfo, "Full-sort",
|
||||
indent_first_line, es);
|
||||
if (prefixsortGroupInfo->groupCount > 0)
|
||||
{
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
appendStringInfo(es->str, " ");
|
||||
show_incremental_sort_group_info(prefixsortGroupInfo, "Presorted", false, es);
|
||||
}
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
appendStringInfo(es->str, "\n");
|
||||
|
||||
if (es->workers_state)
|
||||
ExplainCloseWorker(n, es);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Show information on hash buckets/batches.
|
||||
*/
|
||||
|
@ -46,6 +46,7 @@ OBJS = \
|
||||
nodeGroup.o \
|
||||
nodeHash.o \
|
||||
nodeHashjoin.o \
|
||||
nodeIncrementalSort.o \
|
||||
nodeIndexonlyscan.o \
|
||||
nodeIndexscan.o \
|
||||
nodeLimit.o \
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "executor/nodeGroup.h"
|
||||
#include "executor/nodeHash.h"
|
||||
#include "executor/nodeHashjoin.h"
|
||||
#include "executor/nodeIncrementalSort.h"
|
||||
#include "executor/nodeIndexonlyscan.h"
|
||||
#include "executor/nodeIndexscan.h"
|
||||
#include "executor/nodeLimit.h"
|
||||
@ -252,6 +253,10 @@ ExecReScan(PlanState *node)
|
||||
ExecReScanSort((SortState *) node);
|
||||
break;
|
||||
|
||||
case T_IncrementalSortState:
|
||||
ExecReScanIncrementalSort((IncrementalSortState *) node);
|
||||
break;
|
||||
|
||||
case T_GroupState:
|
||||
ExecReScanGroup((GroupState *) node);
|
||||
break;
|
||||
@ -557,8 +562,17 @@ ExecSupportsBackwardScan(Plan *node)
|
||||
case T_CteScan:
|
||||
case T_Material:
|
||||
case T_Sort:
|
||||
/* these don't evaluate tlist */
|
||||
return true;
|
||||
|
||||
case T_IncrementalSort:
|
||||
|
||||
/*
|
||||
* Unlike full sort, incremental sort keeps only a single group of
|
||||
* tuples in memory, so it can't scan backwards.
|
||||
*/
|
||||
return false;
|
||||
|
||||
case T_LockRows:
|
||||
case T_Limit:
|
||||
return ExecSupportsBackwardScan(outerPlan(node));
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "executor/nodeForeignscan.h"
|
||||
#include "executor/nodeHash.h"
|
||||
#include "executor/nodeHashjoin.h"
|
||||
#include "executor/nodeIncrementalSort.h"
|
||||
#include "executor/nodeIndexonlyscan.h"
|
||||
#include "executor/nodeIndexscan.h"
|
||||
#include "executor/nodeSeqscan.h"
|
||||
@ -283,6 +284,10 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecSortEstimate((SortState *) planstate, e->pcxt);
|
||||
break;
|
||||
case T_IncrementalSortState:
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecIncrementalSortEstimate((IncrementalSortState *) planstate, e->pcxt);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
@ -496,6 +501,10 @@ ExecParallelInitializeDSM(PlanState *planstate,
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecSortInitializeDSM((SortState *) planstate, d->pcxt);
|
||||
break;
|
||||
case T_IncrementalSortState:
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecIncrementalSortInitializeDSM((IncrementalSortState *) planstate, d->pcxt);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
@ -972,6 +981,7 @@ ExecParallelReInitializeDSM(PlanState *planstate,
|
||||
break;
|
||||
case T_HashState:
|
||||
case T_SortState:
|
||||
case T_IncrementalSortState:
|
||||
/* these nodes have DSM state, but no reinitialization is required */
|
||||
break;
|
||||
|
||||
@ -1032,6 +1042,9 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
|
||||
case T_SortState:
|
||||
ExecSortRetrieveInstrumentation((SortState *) planstate);
|
||||
break;
|
||||
case T_IncrementalSortState:
|
||||
ExecIncrementalSortRetrieveInstrumentation((IncrementalSortState *) planstate);
|
||||
break;
|
||||
case T_HashState:
|
||||
ExecHashRetrieveInstrumentation((HashState *) planstate);
|
||||
break;
|
||||
@ -1318,6 +1331,11 @@ ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecSortInitializeWorker((SortState *) planstate, pwcxt);
|
||||
break;
|
||||
case T_IncrementalSortState:
|
||||
/* even when not parallel-aware, for EXPLAIN ANALYZE */
|
||||
ExecIncrementalSortInitializeWorker((IncrementalSortState *) planstate,
|
||||
pwcxt);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
|
@ -88,6 +88,7 @@
|
||||
#include "executor/nodeGroup.h"
|
||||
#include "executor/nodeHash.h"
|
||||
#include "executor/nodeHashjoin.h"
|
||||
#include "executor/nodeIncrementalSort.h"
|
||||
#include "executor/nodeIndexonlyscan.h"
|
||||
#include "executor/nodeIndexscan.h"
|
||||
#include "executor/nodeLimit.h"
|
||||
@ -313,6 +314,11 @@ ExecInitNode(Plan *node, EState *estate, int eflags)
|
||||
estate, eflags);
|
||||
break;
|
||||
|
||||
case T_IncrementalSort:
|
||||
result = (PlanState *) ExecInitIncrementalSort((IncrementalSort *) node,
|
||||
estate, eflags);
|
||||
break;
|
||||
|
||||
case T_Group:
|
||||
result = (PlanState *) ExecInitGroup((Group *) node,
|
||||
estate, eflags);
|
||||
@ -693,6 +699,10 @@ ExecEndNode(PlanState *node)
|
||||
ExecEndSort((SortState *) node);
|
||||
break;
|
||||
|
||||
case T_IncrementalSortState:
|
||||
ExecEndIncrementalSort((IncrementalSortState *) node);
|
||||
break;
|
||||
|
||||
case T_GroupState:
|
||||
ExecEndGroup((GroupState *) node);
|
||||
break;
|
||||
@ -839,6 +849,30 @@ ExecSetTupleBound(int64 tuples_needed, PlanState *child_node)
|
||||
sortState->bound = tuples_needed;
|
||||
}
|
||||
}
|
||||
else if (IsA(child_node, IncrementalSortState))
|
||||
{
|
||||
/*
|
||||
* If it is an IncrementalSort node, notify it that it can use bounded
|
||||
* sort.
|
||||
*
|
||||
* Note: it is the responsibility of nodeIncrementalSort.c to react
|
||||
* properly to changes of these parameters. If we ever redesign this,
|
||||
* it'd be a good idea to integrate this signaling with the
|
||||
* parameter-change mechanism.
|
||||
*/
|
||||
IncrementalSortState *sortState = (IncrementalSortState *) child_node;
|
||||
|
||||
if (tuples_needed < 0)
|
||||
{
|
||||
/* make sure flag gets reset if needed upon rescan */
|
||||
sortState->bounded = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
sortState->bounded = true;
|
||||
sortState->bound = tuples_needed;
|
||||
}
|
||||
}
|
||||
else if (IsA(child_node, AppendState))
|
||||
{
|
||||
/*
|
||||
|
1263
src/backend/executor/nodeIncrementalSort.c
Normal file
1263
src/backend/executor/nodeIncrementalSort.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -93,7 +93,8 @@ ExecSort(PlanState *pstate)
|
||||
plannode->collations,
|
||||
plannode->nullsFirst,
|
||||
work_mem,
|
||||
NULL, node->randomAccess);
|
||||
NULL,
|
||||
node->randomAccess);
|
||||
if (node->bounded)
|
||||
tuplesort_set_bound(tuplesortstate, node->bound);
|
||||
node->tuplesortstate = (void *) tuplesortstate;
|
||||
|
@ -927,6 +927,24 @@ _copyMaterial(const Material *from)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CopySortFields
|
||||
*
|
||||
* This function copies the fields of the Sort node. It is used by
|
||||
* all the copy functions for classes which inherit from Sort.
|
||||
*/
|
||||
static void
|
||||
CopySortFields(const Sort *from, Sort *newnode)
|
||||
{
|
||||
CopyPlanFields((const Plan *) from, (Plan *) newnode);
|
||||
|
||||
COPY_SCALAR_FIELD(numCols);
|
||||
COPY_POINTER_FIELD(sortColIdx, from->numCols * sizeof(AttrNumber));
|
||||
COPY_POINTER_FIELD(sortOperators, from->numCols * sizeof(Oid));
|
||||
COPY_POINTER_FIELD(collations, from->numCols * sizeof(Oid));
|
||||
COPY_POINTER_FIELD(nullsFirst, from->numCols * sizeof(bool));
|
||||
}
|
||||
|
||||
/*
|
||||
* _copySort
|
||||
*/
|
||||
@ -938,13 +956,29 @@ _copySort(const Sort *from)
|
||||
/*
|
||||
* copy node superclass fields
|
||||
*/
|
||||
CopyPlanFields((const Plan *) from, (Plan *) newnode);
|
||||
CopySortFields(from, newnode);
|
||||
|
||||
COPY_SCALAR_FIELD(numCols);
|
||||
COPY_POINTER_FIELD(sortColIdx, from->numCols * sizeof(AttrNumber));
|
||||
COPY_POINTER_FIELD(sortOperators, from->numCols * sizeof(Oid));
|
||||
COPY_POINTER_FIELD(collations, from->numCols * sizeof(Oid));
|
||||
COPY_POINTER_FIELD(nullsFirst, from->numCols * sizeof(bool));
|
||||
return newnode;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* _copyIncrementalSort
|
||||
*/
|
||||
static IncrementalSort *
|
||||
_copyIncrementalSort(const IncrementalSort *from)
|
||||
{
|
||||
IncrementalSort *newnode = makeNode(IncrementalSort);
|
||||
|
||||
/*
|
||||
* copy node superclass fields
|
||||
*/
|
||||
CopySortFields((const Sort *) from, (Sort *) newnode);
|
||||
|
||||
/*
|
||||
* copy remainder of node
|
||||
*/
|
||||
COPY_SCALAR_FIELD(nPresortedCols);
|
||||
|
||||
return newnode;
|
||||
}
|
||||
@ -4898,6 +4932,9 @@ copyObjectImpl(const void *from)
|
||||
case T_Sort:
|
||||
retval = _copySort(from);
|
||||
break;
|
||||
case T_IncrementalSort:
|
||||
retval = _copyIncrementalSort(from);
|
||||
break;
|
||||
case T_Group:
|
||||
retval = _copyGroup(from);
|
||||
break;
|
||||
|
@ -837,10 +837,8 @@ _outMaterial(StringInfo str, const Material *node)
|
||||
}
|
||||
|
||||
static void
|
||||
_outSort(StringInfo str, const Sort *node)
|
||||
_outSortInfo(StringInfo str, const Sort *node)
|
||||
{
|
||||
WRITE_NODE_TYPE("SORT");
|
||||
|
||||
_outPlanInfo(str, (const Plan *) node);
|
||||
|
||||
WRITE_INT_FIELD(numCols);
|
||||
@ -850,6 +848,24 @@ _outSort(StringInfo str, const Sort *node)
|
||||
WRITE_BOOL_ARRAY(nullsFirst, node->numCols);
|
||||
}
|
||||
|
||||
static void
|
||||
_outSort(StringInfo str, const Sort *node)
|
||||
{
|
||||
WRITE_NODE_TYPE("SORT");
|
||||
|
||||
_outSortInfo(str, node);
|
||||
}
|
||||
|
||||
static void
|
||||
_outIncrementalSort(StringInfo str, const IncrementalSort *node)
|
||||
{
|
||||
WRITE_NODE_TYPE("INCREMENTALSORT");
|
||||
|
||||
_outSortInfo(str, (const Sort *) node);
|
||||
|
||||
WRITE_INT_FIELD(nPresortedCols);
|
||||
}
|
||||
|
||||
static void
|
||||
_outUnique(StringInfo str, const Unique *node)
|
||||
{
|
||||
@ -3786,6 +3802,9 @@ outNode(StringInfo str, const void *obj)
|
||||
case T_Sort:
|
||||
_outSort(str, obj);
|
||||
break;
|
||||
case T_IncrementalSort:
|
||||
_outIncrementalSort(str, obj);
|
||||
break;
|
||||
case T_Unique:
|
||||
_outUnique(str, obj);
|
||||
break;
|
||||
|
@ -2150,12 +2150,13 @@ _readMaterial(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* _readSort
|
||||
* ReadCommonSort
|
||||
* Assign the basic stuff of all nodes that inherit from Sort
|
||||
*/
|
||||
static Sort *
|
||||
_readSort(void)
|
||||
static void
|
||||
ReadCommonSort(Sort *local_node)
|
||||
{
|
||||
READ_LOCALS(Sort);
|
||||
READ_TEMP_LOCALS();
|
||||
|
||||
ReadCommonPlan(&local_node->plan);
|
||||
|
||||
@ -2164,6 +2165,32 @@ _readSort(void)
|
||||
READ_OID_ARRAY(sortOperators, local_node->numCols);
|
||||
READ_OID_ARRAY(collations, local_node->numCols);
|
||||
READ_BOOL_ARRAY(nullsFirst, local_node->numCols);
|
||||
}
|
||||
|
||||
/*
|
||||
* _readSort
|
||||
*/
|
||||
static Sort *
|
||||
_readSort(void)
|
||||
{
|
||||
READ_LOCALS_NO_FIELDS(Sort);
|
||||
|
||||
ReadCommonSort(local_node);
|
||||
|
||||
READ_DONE();
|
||||
}
|
||||
|
||||
/*
|
||||
* _readIncrementalSort
|
||||
*/
|
||||
static IncrementalSort *
|
||||
_readIncrementalSort(void)
|
||||
{
|
||||
READ_LOCALS(IncrementalSort);
|
||||
|
||||
ReadCommonSort(&local_node->sort);
|
||||
|
||||
READ_INT_FIELD(nPresortedCols);
|
||||
|
||||
READ_DONE();
|
||||
}
|
||||
@ -2801,6 +2828,8 @@ parseNodeString(void)
|
||||
return_value = _readMaterial();
|
||||
else if (MATCH("SORT", 4))
|
||||
return_value = _readSort();
|
||||
else if (MATCH("INCREMENTALSORT", 15))
|
||||
return_value = _readIncrementalSort();
|
||||
else if (MATCH("GROUP", 5))
|
||||
return_value = _readGroup();
|
||||
else if (MATCH("AGG", 3))
|
||||
|
@ -3881,6 +3881,10 @@ print_path(PlannerInfo *root, Path *path, int indent)
|
||||
ptype = "Sort";
|
||||
subpath = ((SortPath *) path)->subpath;
|
||||
break;
|
||||
case T_IncrementalSortPath:
|
||||
ptype = "IncrementalSort";
|
||||
subpath = ((SortPath *) path)->subpath;
|
||||
break;
|
||||
case T_GroupPath:
|
||||
ptype = "Group";
|
||||
subpath = ((GroupPath *) path)->subpath;
|
||||
|
@ -128,6 +128,7 @@ bool enable_indexonlyscan = true;
|
||||
bool enable_bitmapscan = true;
|
||||
bool enable_tidscan = true;
|
||||
bool enable_sort = true;
|
||||
bool enable_incrementalsort = true;
|
||||
bool enable_hashagg = true;
|
||||
bool enable_hashagg_disk = true;
|
||||
bool enable_groupingsets_hash_disk = false;
|
||||
@ -1648,9 +1649,9 @@ cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
|
||||
}
|
||||
|
||||
/*
|
||||
* cost_sort
|
||||
* Determines and returns the cost of sorting a relation, including
|
||||
* the cost of reading the input data.
|
||||
* cost_tuplesort
|
||||
* Determines and returns the cost of sorting a relation using tuplesort,
|
||||
* not including the cost of reading the input data.
|
||||
*
|
||||
* If the total volume of data to sort is less than sort_mem, we will do
|
||||
* an in-memory sort, which requires no I/O and about t*log2(t) tuple
|
||||
@ -1677,39 +1678,23 @@ cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
|
||||
* specifying nonzero comparison_cost; typically that's used for any extra
|
||||
* work that has to be done to prepare the inputs to the comparison operators.
|
||||
*
|
||||
* 'pathkeys' is a list of sort keys
|
||||
* 'input_cost' is the total cost for reading the input data
|
||||
* 'tuples' is the number of tuples in the relation
|
||||
* 'width' is the average tuple width in bytes
|
||||
* 'comparison_cost' is the extra cost per comparison, if any
|
||||
* 'sort_mem' is the number of kilobytes of work memory allowed for the sort
|
||||
* 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
|
||||
*
|
||||
* NOTE: some callers currently pass NIL for pathkeys because they
|
||||
* can't conveniently supply the sort keys. Since this routine doesn't
|
||||
* currently do anything with pathkeys anyway, that doesn't matter...
|
||||
* but if it ever does, it should react gracefully to lack of key data.
|
||||
* (Actually, the thing we'd most likely be interested in is just the number
|
||||
* of sort keys, which all callers *could* supply.)
|
||||
*/
|
||||
void
|
||||
cost_sort(Path *path, PlannerInfo *root,
|
||||
List *pathkeys, Cost input_cost, double tuples, int width,
|
||||
Cost comparison_cost, int sort_mem,
|
||||
double limit_tuples)
|
||||
static void
|
||||
cost_tuplesort(Cost *startup_cost, Cost *run_cost,
|
||||
double tuples, int width,
|
||||
Cost comparison_cost, int sort_mem,
|
||||
double limit_tuples)
|
||||
{
|
||||
Cost startup_cost = input_cost;
|
||||
Cost run_cost = 0;
|
||||
double input_bytes = relation_byte_size(tuples, width);
|
||||
double output_bytes;
|
||||
double output_tuples;
|
||||
long sort_mem_bytes = sort_mem * 1024L;
|
||||
|
||||
if (!enable_sort)
|
||||
startup_cost += disable_cost;
|
||||
|
||||
path->rows = tuples;
|
||||
|
||||
/*
|
||||
* We want to be sure the cost of a sort is never estimated as zero, even
|
||||
* if passed-in tuple count is zero. Besides, mustn't do log(0)...
|
||||
@ -1748,7 +1733,7 @@ cost_sort(Path *path, PlannerInfo *root,
|
||||
*
|
||||
* Assume about N log2 N comparisons
|
||||
*/
|
||||
startup_cost += comparison_cost * tuples * LOG2(tuples);
|
||||
*startup_cost = comparison_cost * tuples * LOG2(tuples);
|
||||
|
||||
/* Disk costs */
|
||||
|
||||
@ -1759,7 +1744,7 @@ cost_sort(Path *path, PlannerInfo *root,
|
||||
log_runs = 1.0;
|
||||
npageaccesses = 2.0 * npages * log_runs;
|
||||
/* Assume 3/4ths of accesses are sequential, 1/4th are not */
|
||||
startup_cost += npageaccesses *
|
||||
*startup_cost += npageaccesses *
|
||||
(seq_page_cost * 0.75 + random_page_cost * 0.25);
|
||||
}
|
||||
else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
|
||||
@ -1770,12 +1755,12 @@ cost_sort(Path *path, PlannerInfo *root,
|
||||
* factor is a bit higher than for quicksort. Tweak it so that the
|
||||
* cost curve is continuous at the crossover point.
|
||||
*/
|
||||
startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
|
||||
*startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We'll use plain quicksort on all the input tuples */
|
||||
startup_cost += comparison_cost * tuples * LOG2(tuples);
|
||||
*startup_cost = comparison_cost * tuples * LOG2(tuples);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1786,8 +1771,143 @@ cost_sort(Path *path, PlannerInfo *root,
|
||||
* here --- the upper LIMIT will pro-rate the run cost so we'd be double
|
||||
* counting the LIMIT otherwise.
|
||||
*/
|
||||
run_cost += cpu_operator_cost * tuples;
|
||||
*run_cost = cpu_operator_cost * tuples;
|
||||
}
|
||||
|
||||
/*
|
||||
* cost_incremental_sort
|
||||
* Determines and returns the cost of sorting a relation incrementally, when
|
||||
* the input path is presorted by a prefix of the pathkeys.
|
||||
*
|
||||
* 'presorted_keys' is the number of leading pathkeys by which the input path
|
||||
* is sorted.
|
||||
*
|
||||
* We estimate the number of groups into which the relation is divided by the
|
||||
* leading pathkeys, and then calculate the cost of sorting a single group
|
||||
* with tuplesort using cost_tuplesort().
|
||||
*/
|
||||
void
|
||||
cost_incremental_sort(Path *path,
|
||||
PlannerInfo *root, List *pathkeys, int presorted_keys,
|
||||
Cost input_startup_cost, Cost input_total_cost,
|
||||
double input_tuples, int width, Cost comparison_cost, int sort_mem,
|
||||
double limit_tuples)
|
||||
{
|
||||
Cost startup_cost = 0,
|
||||
run_cost = 0,
|
||||
input_run_cost = input_total_cost - input_startup_cost;
|
||||
double group_tuples,
|
||||
input_groups;
|
||||
Cost group_startup_cost,
|
||||
group_run_cost,
|
||||
group_input_run_cost;
|
||||
List *presortedExprs = NIL;
|
||||
ListCell *l;
|
||||
int i = 0;
|
||||
|
||||
Assert(presorted_keys != 0);
|
||||
|
||||
/*
|
||||
* We want to be sure the cost of a sort is never estimated as zero, even
|
||||
* if passed-in tuple count is zero. Besides, mustn't do log(0)...
|
||||
*/
|
||||
if (input_tuples < 2.0)
|
||||
input_tuples = 2.0;
|
||||
|
||||
/* Extract presorted keys as list of expressions */
|
||||
foreach(l, pathkeys)
|
||||
{
|
||||
PathKey *key = (PathKey *) lfirst(l);
|
||||
EquivalenceMember *member = (EquivalenceMember *)
|
||||
linitial(key->pk_eclass->ec_members);
|
||||
|
||||
presortedExprs = lappend(presortedExprs, member->em_expr);
|
||||
|
||||
i++;
|
||||
if (i >= presorted_keys)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Estimate number of groups with equal presorted keys */
|
||||
input_groups = estimate_num_groups(root, presortedExprs, input_tuples, NULL);
|
||||
group_tuples = input_tuples / input_groups;
|
||||
group_input_run_cost = input_run_cost / input_groups;
|
||||
|
||||
/*
|
||||
* Estimate average cost of sorting of one group where presorted keys are
|
||||
* equal. Incremental sort is sensitive to distribution of tuples to the
|
||||
* groups, where we're relying on quite rough assumptions. Thus, we're
|
||||
* pessimistic about incremental sort performance and increase its average
|
||||
* group size by half.
|
||||
*/
|
||||
cost_tuplesort(&group_startup_cost, &group_run_cost,
|
||||
1.5 * group_tuples, width, comparison_cost, sort_mem,
|
||||
limit_tuples);
|
||||
|
||||
/*
|
||||
* Startup cost of incremental sort is the startup cost of its first group
|
||||
* plus the cost of its input.
|
||||
*/
|
||||
startup_cost += group_startup_cost
|
||||
+ input_startup_cost + group_input_run_cost;
|
||||
|
||||
/*
|
||||
* After we started producing tuples from the first group, the cost of
|
||||
* producing all the tuples is given by the cost to finish processing this
|
||||
* group, plus the total cost to process the remaining groups, plus the
|
||||
* remaining cost of input.
|
||||
*/
|
||||
run_cost += group_run_cost
|
||||
+ (group_run_cost + group_startup_cost) * (input_groups - 1)
|
||||
+ group_input_run_cost * (input_groups - 1);
|
||||
|
||||
/*
|
||||
* Incremental sort adds some overhead by itself. Firstly, it has to
|
||||
* detect the sort groups. This is roughly equal to one extra copy and
|
||||
* comparison per tuple. Secondly, it has to reset the tuplesort context
|
||||
* for every group.
|
||||
*/
|
||||
run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
|
||||
run_cost += 2.0 * cpu_tuple_cost * input_groups;
|
||||
|
||||
path->rows = input_tuples;
|
||||
path->startup_cost = startup_cost;
|
||||
path->total_cost = startup_cost + run_cost;
|
||||
}
|
||||
|
||||
/*
|
||||
* cost_sort
|
||||
* Determines and returns the cost of sorting a relation, including
|
||||
* the cost of reading the input data.
|
||||
*
|
||||
* NOTE: some callers currently pass NIL for pathkeys because they
|
||||
* can't conveniently supply the sort keys. Since this routine doesn't
|
||||
* currently do anything with pathkeys anyway, that doesn't matter...
|
||||
* but if it ever does, it should react gracefully to lack of key data.
|
||||
* (Actually, the thing we'd most likely be interested in is just the number
|
||||
* of sort keys, which all callers *could* supply.)
|
||||
*/
|
||||
void
|
||||
cost_sort(Path *path, PlannerInfo *root,
|
||||
List *pathkeys, Cost input_cost, double tuples, int width,
|
||||
Cost comparison_cost, int sort_mem,
|
||||
double limit_tuples)
|
||||
|
||||
{
|
||||
Cost startup_cost;
|
||||
Cost run_cost;
|
||||
|
||||
cost_tuplesort(&startup_cost, &run_cost,
|
||||
tuples, width,
|
||||
comparison_cost, sort_mem,
|
||||
limit_tuples);
|
||||
|
||||
if (!enable_sort)
|
||||
startup_cost += disable_cost;
|
||||
|
||||
startup_cost += input_cost;
|
||||
|
||||
path->rows = tuples;
|
||||
path->startup_cost = startup_cost;
|
||||
path->total_cost = startup_cost + run_cost;
|
||||
}
|
||||
|
@ -334,6 +334,60 @@ pathkeys_contained_in(List *keys1, List *keys2)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* pathkeys_count_contained_in
|
||||
* Same as pathkeys_contained_in, but also sets length of longest
|
||||
* common prefix of keys1 and keys2.
|
||||
*/
|
||||
bool
|
||||
pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
|
||||
{
|
||||
int n = 0;
|
||||
ListCell *key1,
|
||||
*key2;
|
||||
|
||||
/*
|
||||
* See if we can avoiding looping through both lists. This optimization
|
||||
* gains us several percent in planning time in a worst-case test.
|
||||
*/
|
||||
if (keys1 == keys2)
|
||||
{
|
||||
*n_common = list_length(keys1);
|
||||
return true;
|
||||
}
|
||||
else if (keys1 == NIL)
|
||||
{
|
||||
*n_common = 0;
|
||||
return true;
|
||||
}
|
||||
else if (keys2 == NIL)
|
||||
{
|
||||
*n_common = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If both lists are non-empty, iterate through both to find out how many
|
||||
* items are shared.
|
||||
*/
|
||||
forboth(key1, keys1, key2, keys2)
|
||||
{
|
||||
PathKey *pathkey1 = (PathKey *) lfirst(key1);
|
||||
PathKey *pathkey2 = (PathKey *) lfirst(key2);
|
||||
|
||||
if (pathkey1 != pathkey2)
|
||||
{
|
||||
*n_common = n;
|
||||
return false;
|
||||
}
|
||||
n++;
|
||||
}
|
||||
|
||||
/* If we ended with a null value, then we've processed the whole list. */
|
||||
*n_common = n;
|
||||
return (key1 == NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* get_cheapest_path_for_pathkeys
|
||||
* Find the cheapest path (according to the specified criterion) that
|
||||
@ -1786,26 +1840,26 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey)
|
||||
* Count the number of pathkeys that are useful for meeting the
|
||||
* query's requested output ordering.
|
||||
*
|
||||
* Unlike merge pathkeys, this is an all-or-nothing affair: it does us
|
||||
* no good to order by just the first key(s) of the requested ordering.
|
||||
* So the result is always either 0 or list_length(root->query_pathkeys).
|
||||
* Because we the have the possibility of incremental sort, a prefix list of
|
||||
* keys is potentially useful for improving the performance of the requested
|
||||
* ordering. Thus we return 0, if no valuable keys are found, or the number
|
||||
* of leading keys shared by the list and the requested ordering..
|
||||
*/
|
||||
static int
|
||||
pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
|
||||
{
|
||||
int n_common_pathkeys;
|
||||
|
||||
if (root->query_pathkeys == NIL)
|
||||
return 0; /* no special ordering requested */
|
||||
|
||||
if (pathkeys == NIL)
|
||||
return 0; /* unordered path */
|
||||
|
||||
if (pathkeys_contained_in(root->query_pathkeys, pathkeys))
|
||||
{
|
||||
/* It's useful ... or at least the first N keys are */
|
||||
return list_length(root->query_pathkeys);
|
||||
}
|
||||
(void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys,
|
||||
&n_common_pathkeys);
|
||||
|
||||
return 0; /* path ordering not useful */
|
||||
return n_common_pathkeys;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -98,6 +98,8 @@ static Plan *create_projection_plan(PlannerInfo *root,
|
||||
int flags);
|
||||
static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
|
||||
static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
|
||||
static IncrementalSort *create_incrementalsort_plan(PlannerInfo *root,
|
||||
IncrementalSortPath *best_path, int flags);
|
||||
static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
|
||||
static Unique *create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path,
|
||||
int flags);
|
||||
@ -244,6 +246,10 @@ static MergeJoin *make_mergejoin(List *tlist,
|
||||
static Sort *make_sort(Plan *lefttree, int numCols,
|
||||
AttrNumber *sortColIdx, Oid *sortOperators,
|
||||
Oid *collations, bool *nullsFirst);
|
||||
static IncrementalSort *make_incrementalsort(Plan *lefttree,
|
||||
int numCols, int nPresortedCols,
|
||||
AttrNumber *sortColIdx, Oid *sortOperators,
|
||||
Oid *collations, bool *nullsFirst);
|
||||
static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
|
||||
Relids relids,
|
||||
const AttrNumber *reqColIdx,
|
||||
@ -258,6 +264,8 @@ static EquivalenceMember *find_ec_member_for_tle(EquivalenceClass *ec,
|
||||
Relids relids);
|
||||
static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
|
||||
Relids relids);
|
||||
static IncrementalSort *make_incrementalsort_from_pathkeys(Plan *lefttree,
|
||||
List *pathkeys, Relids relids, int nPresortedCols);
|
||||
static Sort *make_sort_from_groupcols(List *groupcls,
|
||||
AttrNumber *grpColIdx,
|
||||
Plan *lefttree);
|
||||
@ -460,6 +468,11 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
|
||||
(SortPath *) best_path,
|
||||
flags);
|
||||
break;
|
||||
case T_IncrementalSort:
|
||||
plan = (Plan *) create_incrementalsort_plan(root,
|
||||
(IncrementalSortPath *) best_path,
|
||||
flags);
|
||||
break;
|
||||
case T_Group:
|
||||
plan = (Plan *) create_group_plan(root,
|
||||
(GroupPath *) best_path);
|
||||
@ -1994,6 +2007,32 @@ create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
|
||||
return plan;
|
||||
}
|
||||
|
||||
/*
|
||||
* create_incrementalsort_plan
|
||||
*
|
||||
* Do the same as create_sort_plan, but create IncrementalSort plan.
|
||||
*/
|
||||
static IncrementalSort *
|
||||
create_incrementalsort_plan(PlannerInfo *root, IncrementalSortPath *best_path,
|
||||
int flags)
|
||||
{
|
||||
IncrementalSort *plan;
|
||||
Plan *subplan;
|
||||
|
||||
/* See comments in create_sort_plan() above */
|
||||
subplan = create_plan_recurse(root, best_path->spath.subpath,
|
||||
flags | CP_SMALL_TLIST);
|
||||
plan = make_incrementalsort_from_pathkeys(subplan,
|
||||
best_path->spath.path.pathkeys,
|
||||
IS_OTHER_REL(best_path->spath.subpath->parent) ?
|
||||
best_path->spath.path.parent->relids : NULL,
|
||||
best_path->nPresortedCols);
|
||||
|
||||
copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
|
||||
|
||||
return plan;
|
||||
}
|
||||
|
||||
/*
|
||||
* create_group_plan
|
||||
*
|
||||
@ -5090,6 +5129,12 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
|
||||
Plan *lefttree = plan->plan.lefttree;
|
||||
Path sort_path; /* dummy for result of cost_sort */
|
||||
|
||||
/*
|
||||
* This function shouldn't have to deal with IncrementalSort plans because
|
||||
* they are only created from corresponding Path nodes.
|
||||
*/
|
||||
Assert(IsA(plan, Sort));
|
||||
|
||||
cost_sort(&sort_path, root, NIL,
|
||||
lefttree->total_cost,
|
||||
lefttree->plan_rows,
|
||||
@ -5677,9 +5722,12 @@ make_sort(Plan *lefttree, int numCols,
|
||||
AttrNumber *sortColIdx, Oid *sortOperators,
|
||||
Oid *collations, bool *nullsFirst)
|
||||
{
|
||||
Sort *node = makeNode(Sort);
|
||||
Plan *plan = &node->plan;
|
||||
Sort *node;
|
||||
Plan *plan;
|
||||
|
||||
node = makeNode(Sort);
|
||||
|
||||
plan = &node->plan;
|
||||
plan->targetlist = lefttree->targetlist;
|
||||
plan->qual = NIL;
|
||||
plan->lefttree = lefttree;
|
||||
@ -5693,6 +5741,37 @@ make_sort(Plan *lefttree, int numCols,
|
||||
return node;
|
||||
}
|
||||
|
||||
/*
|
||||
* make_incrementalsort --- basic routine to build an IncrementalSort plan node
|
||||
*
|
||||
* Caller must have built the sortColIdx, sortOperators, collations, and
|
||||
* nullsFirst arrays already.
|
||||
*/
|
||||
static IncrementalSort *
|
||||
make_incrementalsort(Plan *lefttree, int numCols, int nPresortedCols,
|
||||
AttrNumber *sortColIdx, Oid *sortOperators,
|
||||
Oid *collations, bool *nullsFirst)
|
||||
{
|
||||
IncrementalSort *node;
|
||||
Plan *plan;
|
||||
|
||||
node = makeNode(IncrementalSort);
|
||||
|
||||
plan = &node->sort.plan;
|
||||
plan->targetlist = lefttree->targetlist;
|
||||
plan->qual = NIL;
|
||||
plan->lefttree = lefttree;
|
||||
plan->righttree = NULL;
|
||||
node->nPresortedCols = nPresortedCols;
|
||||
node->sort.numCols = numCols;
|
||||
node->sort.sortColIdx = sortColIdx;
|
||||
node->sort.sortOperators = sortOperators;
|
||||
node->sort.collations = collations;
|
||||
node->sort.nullsFirst = nullsFirst;
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
/*
|
||||
* prepare_sort_from_pathkeys
|
||||
* Prepare to sort according to given pathkeys
|
||||
@ -6039,6 +6118,42 @@ make_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids)
|
||||
collations, nullsFirst);
|
||||
}
|
||||
|
||||
/*
|
||||
* make_incrementalsort_from_pathkeys
|
||||
* Create sort plan to sort according to given pathkeys
|
||||
*
|
||||
* 'lefttree' is the node which yields input tuples
|
||||
* 'pathkeys' is the list of pathkeys by which the result is to be sorted
|
||||
* 'relids' is the set of relations required by prepare_sort_from_pathkeys()
|
||||
* 'nPresortedCols' is the number of presorted columns in input tuples
|
||||
*/
|
||||
static IncrementalSort *
|
||||
make_incrementalsort_from_pathkeys(Plan *lefttree, List *pathkeys,
|
||||
Relids relids, int nPresortedCols)
|
||||
{
|
||||
int numsortkeys;
|
||||
AttrNumber *sortColIdx;
|
||||
Oid *sortOperators;
|
||||
Oid *collations;
|
||||
bool *nullsFirst;
|
||||
|
||||
/* Compute sort column info, and adjust lefttree as needed */
|
||||
lefttree = prepare_sort_from_pathkeys(lefttree, pathkeys,
|
||||
relids,
|
||||
NULL,
|
||||
false,
|
||||
&numsortkeys,
|
||||
&sortColIdx,
|
||||
&sortOperators,
|
||||
&collations,
|
||||
&nullsFirst);
|
||||
|
||||
/* Now build the Sort node */
|
||||
return make_incrementalsort(lefttree, numsortkeys, nPresortedCols,
|
||||
sortColIdx, sortOperators,
|
||||
collations, nullsFirst);
|
||||
}
|
||||
|
||||
/*
|
||||
* make_sort_from_sortclauses
|
||||
* Create sort plan to sort according to given sortclauses
|
||||
@ -6774,6 +6889,7 @@ is_projection_capable_path(Path *path)
|
||||
case T_Hash:
|
||||
case T_Material:
|
||||
case T_Sort:
|
||||
case T_IncrementalSort:
|
||||
case T_Unique:
|
||||
case T_SetOp:
|
||||
case T_LockRows:
|
||||
|
@ -4924,13 +4924,16 @@ create_distinct_paths(PlannerInfo *root,
|
||||
* Build a new upperrel containing Paths for ORDER BY evaluation.
|
||||
*
|
||||
* All paths in the result must satisfy the ORDER BY ordering.
|
||||
* The only new path we need consider is an explicit sort on the
|
||||
* cheapest-total existing path.
|
||||
* The only new paths we need consider are an explicit full sort
|
||||
* and incremental sort on the cheapest-total existing path.
|
||||
*
|
||||
* input_rel: contains the source-data Paths
|
||||
* target: the output tlist the result Paths must emit
|
||||
* limit_tuples: estimated bound on the number of output tuples,
|
||||
* or -1 if no LIMIT or couldn't estimate
|
||||
*
|
||||
* XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
|
||||
* other pathkeys (grouping, ...) like generate_useful_gather_paths.
|
||||
*/
|
||||
static RelOptInfo *
|
||||
create_ordered_paths(PlannerInfo *root,
|
||||
@ -4964,29 +4967,77 @@ create_ordered_paths(PlannerInfo *root,
|
||||
|
||||
foreach(lc, input_rel->pathlist)
|
||||
{
|
||||
Path *path = (Path *) lfirst(lc);
|
||||
Path *input_path = (Path *) lfirst(lc);
|
||||
Path *sorted_path = input_path;
|
||||
bool is_sorted;
|
||||
int presorted_keys;
|
||||
|
||||
is_sorted = pathkeys_contained_in(root->sort_pathkeys,
|
||||
path->pathkeys);
|
||||
if (path == cheapest_input_path || is_sorted)
|
||||
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
|
||||
input_path->pathkeys, &presorted_keys);
|
||||
|
||||
if (is_sorted)
|
||||
{
|
||||
if (!is_sorted)
|
||||
/* Use the input path as is, but add a projection step if needed */
|
||||
if (sorted_path->pathtarget != target)
|
||||
sorted_path = apply_projection_to_path(root, ordered_rel,
|
||||
sorted_path, target);
|
||||
|
||||
add_path(ordered_rel, sorted_path);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Try adding an explicit sort, but only to the cheapest total path
|
||||
* since a full sort should generally add the same cost to all
|
||||
* paths.
|
||||
*/
|
||||
if (input_path == cheapest_input_path)
|
||||
{
|
||||
/* An explicit sort here can take advantage of LIMIT */
|
||||
path = (Path *) create_sort_path(root,
|
||||
ordered_rel,
|
||||
path,
|
||||
root->sort_pathkeys,
|
||||
limit_tuples);
|
||||
/*
|
||||
* Sort the cheapest input path. An explicit sort here can
|
||||
* take advantage of LIMIT.
|
||||
*/
|
||||
sorted_path = (Path *) create_sort_path(root,
|
||||
ordered_rel,
|
||||
input_path,
|
||||
root->sort_pathkeys,
|
||||
limit_tuples);
|
||||
/* Add projection step if needed */
|
||||
if (sorted_path->pathtarget != target)
|
||||
sorted_path = apply_projection_to_path(root, ordered_rel,
|
||||
sorted_path, target);
|
||||
|
||||
add_path(ordered_rel, sorted_path);
|
||||
}
|
||||
|
||||
/* Add projection step if needed */
|
||||
if (path->pathtarget != target)
|
||||
path = apply_projection_to_path(root, ordered_rel,
|
||||
path, target);
|
||||
/*
|
||||
* If incremental sort is enabled, then try it as well. Unlike with
|
||||
* regular sorts, we can't just look at the cheapest path, because
|
||||
* the cost of incremental sort depends on how well presorted the
|
||||
* path is. Additionally incremental sort may enable a cheaper
|
||||
* startup path to win out despite higher total cost.
|
||||
*/
|
||||
if (!enable_incrementalsort)
|
||||
continue;
|
||||
|
||||
add_path(ordered_rel, path);
|
||||
/* Likewise, if the path can't be used for incremental sort. */
|
||||
if (!presorted_keys)
|
||||
continue;
|
||||
|
||||
/* Also consider incremental sort. */
|
||||
sorted_path = (Path *) create_incremental_sort_path(root,
|
||||
ordered_rel,
|
||||
input_path,
|
||||
root->sort_pathkeys,
|
||||
presorted_keys,
|
||||
limit_tuples);
|
||||
|
||||
/* Add projection step if needed */
|
||||
if (sorted_path->pathtarget != target)
|
||||
sorted_path = apply_projection_to_path(root, ordered_rel,
|
||||
sorted_path, target);
|
||||
|
||||
add_path(ordered_rel, sorted_path);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -678,6 +678,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
|
||||
|
||||
case T_Material:
|
||||
case T_Sort:
|
||||
case T_IncrementalSort:
|
||||
case T_Unique:
|
||||
case T_SetOp:
|
||||
|
||||
|
@ -2688,6 +2688,7 @@ finalize_plan(PlannerInfo *root, Plan *plan,
|
||||
case T_Hash:
|
||||
case T_Material:
|
||||
case T_Sort:
|
||||
case T_IncrementalSort:
|
||||
case T_Unique:
|
||||
case T_SetOp:
|
||||
case T_Group:
|
||||
|
@ -2753,6 +2753,57 @@ create_set_projection_path(PlannerInfo *root,
|
||||
return pathnode;
|
||||
}
|
||||
|
||||
/*
|
||||
* create_incremental_sort_path
|
||||
* Creates a pathnode that represents performing an incremental sort.
|
||||
*
|
||||
* 'rel' is the parent relation associated with the result
|
||||
* 'subpath' is the path representing the source of data
|
||||
* 'pathkeys' represents the desired sort order
|
||||
* 'presorted_keys' is the number of keys by which the input path is
|
||||
* already sorted
|
||||
* 'limit_tuples' is the estimated bound on the number of output tuples,
|
||||
* or -1 if no LIMIT or couldn't estimate
|
||||
*/
|
||||
SortPath *
|
||||
create_incremental_sort_path(PlannerInfo *root,
|
||||
RelOptInfo *rel,
|
||||
Path *subpath,
|
||||
List *pathkeys,
|
||||
int presorted_keys,
|
||||
double limit_tuples)
|
||||
{
|
||||
IncrementalSortPath *sort = makeNode(IncrementalSortPath);
|
||||
SortPath *pathnode = &sort->spath;
|
||||
|
||||
pathnode->path.pathtype = T_IncrementalSort;
|
||||
pathnode->path.parent = rel;
|
||||
/* Sort doesn't project, so use source path's pathtarget */
|
||||
pathnode->path.pathtarget = subpath->pathtarget;
|
||||
/* For now, assume we are above any joins, so no parameterization */
|
||||
pathnode->path.param_info = NULL;
|
||||
pathnode->path.parallel_aware = false;
|
||||
pathnode->path.parallel_safe = rel->consider_parallel &&
|
||||
subpath->parallel_safe;
|
||||
pathnode->path.parallel_workers = subpath->parallel_workers;
|
||||
pathnode->path.pathkeys = pathkeys;
|
||||
|
||||
pathnode->subpath = subpath;
|
||||
|
||||
cost_incremental_sort(&pathnode->path,
|
||||
root, pathkeys, presorted_keys,
|
||||
subpath->startup_cost,
|
||||
subpath->total_cost,
|
||||
subpath->rows,
|
||||
subpath->pathtarget->width,
|
||||
0.0, /* XXX comparison_cost shouldn't be 0? */
|
||||
work_mem, limit_tuples);
|
||||
|
||||
sort->nPresortedCols = presorted_keys;
|
||||
|
||||
return pathnode;
|
||||
}
|
||||
|
||||
/*
|
||||
* create_sort_path
|
||||
* Creates a pathnode that represents performing an explicit sort.
|
||||
|
@ -991,6 +991,15 @@ static struct config_bool ConfigureNamesBool[] =
|
||||
true,
|
||||
NULL, NULL, NULL
|
||||
},
|
||||
{
|
||||
{"enable_incrementalsort", PGC_USERSET, QUERY_TUNING_METHOD,
|
||||
gettext_noop("Enables the planner's use of incremental sort steps."),
|
||||
NULL
|
||||
},
|
||||
&enable_incrementalsort,
|
||||
true,
|
||||
NULL, NULL, NULL
|
||||
},
|
||||
{
|
||||
{"enable_hashagg", PGC_USERSET, QUERY_TUNING_METHOD,
|
||||
gettext_noop("Enables the planner's use of hashed aggregation plans."),
|
||||
|
@ -360,6 +360,7 @@
|
||||
#enable_parallel_append = on
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_incrementalsort = on
|
||||
#enable_tidscan = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
|
@ -125,6 +125,16 @@
|
||||
#define PARALLEL_SORT(state) ((state)->shared == NULL ? 0 : \
|
||||
(state)->worker >= 0 ? 1 : 2)
|
||||
|
||||
/*
|
||||
* Initial size of memtuples array. We're trying to select this size so that
|
||||
* array doesn't exceed ALLOCSET_SEPARATE_THRESHOLD and so that the overhead of
|
||||
* allocation might possibly be lowered. However, we don't consider array sizes
|
||||
* less than 1024.
|
||||
*
|
||||
*/
|
||||
#define INITIAL_MEMTUPSIZE Max(1024, \
|
||||
ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
|
||||
|
||||
/* GUC variables */
|
||||
#ifdef TRACE_SORT
|
||||
bool trace_sort = false;
|
||||
@ -241,6 +251,14 @@ struct Tuplesortstate
|
||||
int64 allowedMem; /* total memory allowed, in bytes */
|
||||
int maxTapes; /* number of tapes (Knuth's T) */
|
||||
int tapeRange; /* maxTapes-1 (Knuth's P) */
|
||||
int64 maxSpace; /* maximum amount of space occupied among sort
|
||||
* of groups, either in-memory or on-disk */
|
||||
bool isMaxSpaceDisk; /* true when maxSpace is value for on-disk
|
||||
* space, false when it's value for in-memory
|
||||
* space */
|
||||
TupSortStatus maxSpaceStatus; /* sort status when maxSpace was reached */
|
||||
MemoryContext maincontext; /* memory context for tuple sort metadata that
|
||||
* persists across multiple batches */
|
||||
MemoryContext sortcontext; /* memory context holding most sort data */
|
||||
MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
|
||||
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
|
||||
@ -591,6 +609,7 @@ struct Sharedsort
|
||||
static Tuplesortstate *tuplesort_begin_common(int workMem,
|
||||
SortCoordinate coordinate,
|
||||
bool randomAccess);
|
||||
static void tuplesort_begin_batch(Tuplesortstate *state);
|
||||
static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
|
||||
static bool consider_abort_common(Tuplesortstate *state);
|
||||
static void inittapes(Tuplesortstate *state, bool mergeruns);
|
||||
@ -647,6 +666,8 @@ static void worker_freeze_result_tape(Tuplesortstate *state);
|
||||
static void worker_nomergeruns(Tuplesortstate *state);
|
||||
static void leader_takeover_tapes(Tuplesortstate *state);
|
||||
static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
|
||||
static void tuplesort_free(Tuplesortstate *state);
|
||||
static void tuplesort_updatemax(Tuplesortstate *state);
|
||||
|
||||
/*
|
||||
* Special versions of qsort just for SortTuple objects. qsort_tuple() sorts
|
||||
@ -682,8 +703,8 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate,
|
||||
bool randomAccess)
|
||||
{
|
||||
Tuplesortstate *state;
|
||||
MemoryContext maincontext;
|
||||
MemoryContext sortcontext;
|
||||
MemoryContext tuplecontext;
|
||||
MemoryContext oldcontext;
|
||||
|
||||
/* See leader_takeover_tapes() remarks on randomAccess support */
|
||||
@ -691,31 +712,31 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate,
|
||||
elog(ERROR, "random access disallowed under parallel sort");
|
||||
|
||||
/*
|
||||
* Create a working memory context for this sort operation. All data
|
||||
* needed by the sort will live inside this context.
|
||||
* Memory context surviving tuplesort_reset. This memory context holds
|
||||
* data which is useful to keep while sorting multiple similar batches.
|
||||
*/
|
||||
sortcontext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
maincontext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"TupleSort main",
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
|
||||
/*
|
||||
* Caller tuple (e.g. IndexTuple) memory context.
|
||||
*
|
||||
* A dedicated child context used exclusively for caller passed tuples
|
||||
* eases memory management. Resetting at key points reduces
|
||||
* fragmentation. Note that the memtuples array of SortTuples is allocated
|
||||
* in the parent context, not this context, because there is no need to
|
||||
* free memtuples early.
|
||||
* Create a working memory context for one sort operation. The content of
|
||||
* this context is deleted by tuplesort_reset.
|
||||
*/
|
||||
tuplecontext = AllocSetContextCreate(sortcontext,
|
||||
"Caller tuples",
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
sortcontext = AllocSetContextCreate(maincontext,
|
||||
"TupleSort sort",
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
|
||||
/*
|
||||
* Make the Tuplesortstate within the per-sort context. This way, we
|
||||
* Additionally a working memory context for tuples is setup in
|
||||
* tuplesort_begin_batch.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Make the Tuplesortstate within the per-sortstate context. This way, we
|
||||
* don't need a separate pfree() operation for it at shutdown.
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(sortcontext);
|
||||
oldcontext = MemoryContextSwitchTo(maincontext);
|
||||
|
||||
state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
|
||||
|
||||
@ -724,11 +745,8 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate,
|
||||
pg_rusage_init(&state->ru_start);
|
||||
#endif
|
||||
|
||||
state->status = TSS_INITIAL;
|
||||
state->randomAccess = randomAccess;
|
||||
state->bounded = false;
|
||||
state->tuples = true;
|
||||
state->boundUsed = false;
|
||||
|
||||
/*
|
||||
* workMem is forced to be at least 64KB, the current minimum valid value
|
||||
@ -737,38 +755,21 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate,
|
||||
* with very little memory.
|
||||
*/
|
||||
state->allowedMem = Max(workMem, 64) * (int64) 1024;
|
||||
state->availMem = state->allowedMem;
|
||||
state->sortcontext = sortcontext;
|
||||
state->tuplecontext = tuplecontext;
|
||||
state->tapeset = NULL;
|
||||
|
||||
state->memtupcount = 0;
|
||||
state->maincontext = maincontext;
|
||||
|
||||
/*
|
||||
* Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
|
||||
* see comments in grow_memtuples().
|
||||
*/
|
||||
state->memtupsize = Max(1024,
|
||||
ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1);
|
||||
|
||||
state->growmemtuples = true;
|
||||
state->slabAllocatorUsed = false;
|
||||
state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
|
||||
|
||||
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
|
||||
|
||||
/* workMem must be large enough for the minimal memtuples array */
|
||||
if (LACKMEM(state))
|
||||
elog(ERROR, "insufficient memory allowed for sort");
|
||||
|
||||
state->currentRun = 0;
|
||||
state->memtupsize = INITIAL_MEMTUPSIZE;
|
||||
state->memtuples = NULL;
|
||||
|
||||
/*
|
||||
* maxTapes, tapeRange, and Algorithm D variables will be initialized by
|
||||
* inittapes(), if needed
|
||||
* After all of the other non-parallel-related state, we setup all of the
|
||||
* state needed for each batch.
|
||||
*/
|
||||
|
||||
state->result_tape = -1; /* flag that result tape has not been formed */
|
||||
tuplesort_begin_batch(state);
|
||||
|
||||
/*
|
||||
* Initialize parallel-related state based on coordination information
|
||||
@ -802,6 +803,77 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate,
|
||||
return state;
|
||||
}
|
||||
|
||||
/*
|
||||
* tuplesort_begin_batch
|
||||
*
|
||||
* Setup, or reset, all state need for processing a new set of tuples with this
|
||||
* sort state. Called both from tuplesort_begin_common (the first time sorting
|
||||
* with this sort state) and tuplesort_reseti (for subsequent usages).
|
||||
*/
|
||||
static void
|
||||
tuplesort_begin_batch(Tuplesortstate *state)
|
||||
{
|
||||
MemoryContext oldcontext;
|
||||
|
||||
oldcontext = MemoryContextSwitchTo(state->maincontext);
|
||||
|
||||
/*
|
||||
* Caller tuple (e.g. IndexTuple) memory context.
|
||||
*
|
||||
* A dedicated child context used exclusively for caller passed tuples
|
||||
* eases memory management. Resetting at key points reduces
|
||||
* fragmentation. Note that the memtuples array of SortTuples is allocated
|
||||
* in the parent context, not this context, because there is no need to
|
||||
* free memtuples early.
|
||||
*/
|
||||
state->tuplecontext = AllocSetContextCreate(state->sortcontext,
|
||||
"Caller tuples",
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
|
||||
state->status = TSS_INITIAL;
|
||||
state->bounded = false;
|
||||
state->boundUsed = false;
|
||||
|
||||
state->availMem = state->allowedMem;
|
||||
|
||||
state->tapeset = NULL;
|
||||
|
||||
state->memtupcount = 0;
|
||||
|
||||
/*
|
||||
* Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
|
||||
* see comments in grow_memtuples().
|
||||
*/
|
||||
state->growmemtuples = true;
|
||||
state->slabAllocatorUsed = false;
|
||||
if (state->memtuples != NULL && state->memtupsize != INITIAL_MEMTUPSIZE)
|
||||
{
|
||||
pfree(state->memtuples);
|
||||
state->memtuples = NULL;
|
||||
state->memtupsize = INITIAL_MEMTUPSIZE;
|
||||
}
|
||||
if (state->memtuples == NULL)
|
||||
{
|
||||
state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
|
||||
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
|
||||
}
|
||||
|
||||
/* workMem must be large enough for the minimal memtuples array */
|
||||
if (LACKMEM(state))
|
||||
elog(ERROR, "insufficient memory allowed for sort");
|
||||
|
||||
state->currentRun = 0;
|
||||
|
||||
/*
|
||||
* maxTapes, tapeRange, and Algorithm D variables will be initialized by
|
||||
* inittapes(), if needed
|
||||
*/
|
||||
|
||||
state->result_tape = -1; /* flag that result tape has not been formed */
|
||||
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
}
|
||||
|
||||
Tuplesortstate *
|
||||
tuplesort_begin_heap(TupleDesc tupDesc,
|
||||
int nkeys, AttrNumber *attNums,
|
||||
@ -814,7 +886,7 @@ tuplesort_begin_heap(TupleDesc tupDesc,
|
||||
MemoryContext oldcontext;
|
||||
int i;
|
||||
|
||||
oldcontext = MemoryContextSwitchTo(state->sortcontext);
|
||||
oldcontext = MemoryContextSwitchTo(state->maincontext);
|
||||
|
||||
AssertArg(nkeys > 0);
|
||||
|
||||
@ -890,7 +962,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
|
||||
|
||||
Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
|
||||
|
||||
oldcontext = MemoryContextSwitchTo(state->sortcontext);
|
||||
oldcontext = MemoryContextSwitchTo(state->maincontext);
|
||||
|
||||
#ifdef TRACE_SORT
|
||||
if (trace_sort)
|
||||
@ -985,7 +1057,7 @@ tuplesort_begin_index_btree(Relation heapRel,
|
||||
MemoryContext oldcontext;
|
||||
int i;
|
||||
|
||||
oldcontext = MemoryContextSwitchTo(state->sortcontext);
|
||||
oldcontext = MemoryContextSwitchTo(state->maincontext);
|
||||
|
||||
#ifdef TRACE_SORT
|
||||
if (trace_sort)
|
||||
@ -1063,7 +1135,7 @@ tuplesort_begin_index_hash(Relation heapRel,
|
||||
randomAccess);
|
||||
MemoryContext oldcontext;
|
||||
|
||||
oldcontext = MemoryContextSwitchTo(state->sortcontext);
|
||||
oldcontext = MemoryContextSwitchTo(state->maincontext);
|
||||
|
||||
#ifdef TRACE_SORT
|
||||
if (trace_sort)
|
||||
@ -1106,7 +1178,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
|
||||
int16 typlen;
|
||||
bool typbyval;
|
||||
|
||||
oldcontext = MemoryContextSwitchTo(state->sortcontext);
|
||||
oldcontext = MemoryContextSwitchTo(state->maincontext);
|
||||
|
||||
#ifdef TRACE_SORT
|
||||
if (trace_sort)
|
||||
@ -1224,16 +1296,23 @@ tuplesort_set_bound(Tuplesortstate *state, int64 bound)
|
||||
}
|
||||
|
||||
/*
|
||||
* tuplesort_end
|
||||
* tuplesort_used_bound
|
||||
*
|
||||
* Release resources and clean up.
|
||||
*
|
||||
* NOTE: after calling this, any pointers returned by tuplesort_getXXX are
|
||||
* pointing to garbage. Be careful not to attempt to use or free such
|
||||
* pointers afterwards!
|
||||
* Allow callers to find out if the sort state was able to use a bound.
|
||||
*/
|
||||
void
|
||||
tuplesort_end(Tuplesortstate *state)
|
||||
bool
|
||||
tuplesort_used_bound(Tuplesortstate *state)
|
||||
{
|
||||
return state->boundUsed;
|
||||
}
|
||||
|
||||
/*
|
||||
* tuplesort_free
|
||||
*
|
||||
* Internal routine for freeing resources of tuplesort.
|
||||
*/
|
||||
static void
|
||||
tuplesort_free(Tuplesortstate *state)
|
||||
{
|
||||
/* context swap probably not needed, but let's be safe */
|
||||
MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
|
||||
@ -1291,10 +1370,104 @@ tuplesort_end(Tuplesortstate *state)
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
|
||||
/*
|
||||
* Free the per-sort memory context, thereby releasing all working memory,
|
||||
* including the Tuplesortstate struct itself.
|
||||
* Free the per-sort memory context, thereby releasing all working memory.
|
||||
*/
|
||||
MemoryContextDelete(state->sortcontext);
|
||||
MemoryContextReset(state->sortcontext);
|
||||
}
|
||||
|
||||
/*
|
||||
* tuplesort_end
|
||||
*
|
||||
* Release resources and clean up.
|
||||
*
|
||||
* NOTE: after calling this, any pointers returned by tuplesort_getXXX are
|
||||
* pointing to garbage. Be careful not to attempt to use or free such
|
||||
* pointers afterwards!
|
||||
*/
|
||||
void
|
||||
tuplesort_end(Tuplesortstate *state)
|
||||
{
|
||||
tuplesort_free(state);
|
||||
|
||||
/*
|
||||
* Free the main memory context, including the Tuplesortstate struct
|
||||
* itself.
|
||||
*/
|
||||
MemoryContextDelete(state->maincontext);
|
||||
}
|
||||
|
||||
/*
|
||||
* tuplesort_updatemax
|
||||
*
|
||||
* Update maximum resource usage statistics.
|
||||
*/
|
||||
static void
|
||||
tuplesort_updatemax(Tuplesortstate *state)
|
||||
{
|
||||
int64 spaceUsed;
|
||||
bool isSpaceDisk;
|
||||
|
||||
/*
|
||||
* Note: it might seem we should provide both memory and disk usage for a
|
||||
* disk-based sort. However, the current code doesn't track memory space
|
||||
* accurately once we have begun to return tuples to the caller (since we
|
||||
* don't account for pfree's the caller is expected to do), so we cannot
|
||||
* rely on availMem in a disk sort. This does not seem worth the overhead
|
||||
* to fix. Is it worth creating an API for the memory context code to
|
||||
* tell us how much is actually used in sortcontext?
|
||||
*/
|
||||
if (state->tapeset)
|
||||
{
|
||||
isSpaceDisk = true;
|
||||
spaceUsed = LogicalTapeSetBlocks(state->tapeset) * BLCKSZ;
|
||||
}
|
||||
else
|
||||
{
|
||||
isSpaceDisk = false;
|
||||
spaceUsed = state->allowedMem - state->availMem;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sort evicts data to the disk when it didn't manage to fit those data to
|
||||
* the main memory. This is why we assume space used on the disk to be
|
||||
* more important for tracking resource usage than space used in memory.
|
||||
* Note that amount of space occupied by some tuple set on the disk might
|
||||
* be less than amount of space occupied by the same tuple set in the
|
||||
* memory due to more compact representation.
|
||||
*/
|
||||
if ((isSpaceDisk && !state->isMaxSpaceDisk) ||
|
||||
(isSpaceDisk == state->isMaxSpaceDisk && spaceUsed > state->maxSpace))
|
||||
{
|
||||
state->maxSpace = spaceUsed;
|
||||
state->isMaxSpaceDisk = isSpaceDisk;
|
||||
state->maxSpaceStatus = state->status;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* tuplesort_reset
|
||||
*
|
||||
* Reset the tuplesort. Reset all the data in the tuplesort, but leave the
|
||||
* meta-information in. After tuplesort_reset, tuplesort is ready to start
|
||||
* a new sort. This allows avoiding recreation of tuple sort states (and
|
||||
* save resources) when sorting multiple small batches.
|
||||
*/
|
||||
void
|
||||
tuplesort_reset(Tuplesortstate *state)
|
||||
{
|
||||
tuplesort_updatemax(state);
|
||||
tuplesort_free(state);
|
||||
|
||||
/*
|
||||
* After we've freed up per-batch memory, re-setup all of the state common
|
||||
* to both the first batch and any subsequent batch.
|
||||
*/
|
||||
tuplesort_begin_batch(state);
|
||||
|
||||
state->lastReturnedTuple = NULL;
|
||||
state->slabMemoryBegin = NULL;
|
||||
state->slabMemoryEnd = NULL;
|
||||
state->slabFreeHead = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2591,8 +2764,7 @@ mergeruns(Tuplesortstate *state)
|
||||
* Reset tuple memory. We've freed all the tuples that we previously
|
||||
* allocated. We will use the slab allocator from now on.
|
||||
*/
|
||||
MemoryContextDelete(state->tuplecontext);
|
||||
state->tuplecontext = NULL;
|
||||
MemoryContextResetOnly(state->tuplecontext);
|
||||
|
||||
/*
|
||||
* We no longer need a large memtuples array. (We will allocate a smaller
|
||||
@ -2642,7 +2814,8 @@ mergeruns(Tuplesortstate *state)
|
||||
* from each input tape.
|
||||
*/
|
||||
state->memtupsize = numInputTapes;
|
||||
state->memtuples = (SortTuple *) palloc(numInputTapes * sizeof(SortTuple));
|
||||
state->memtuples = (SortTuple *) MemoryContextAlloc(state->maincontext,
|
||||
numInputTapes * sizeof(SortTuple));
|
||||
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
|
||||
|
||||
/*
|
||||
@ -3138,18 +3311,15 @@ tuplesort_get_stats(Tuplesortstate *state,
|
||||
* to fix. Is it worth creating an API for the memory context code to
|
||||
* tell us how much is actually used in sortcontext?
|
||||
*/
|
||||
if (state->tapeset)
|
||||
{
|
||||
stats->spaceType = SORT_SPACE_TYPE_DISK;
|
||||
stats->spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
stats->spaceType = SORT_SPACE_TYPE_MEMORY;
|
||||
stats->spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
|
||||
}
|
||||
tuplesort_updatemax(state);
|
||||
|
||||
switch (state->status)
|
||||
if (state->isMaxSpaceDisk)
|
||||
stats->spaceType = SORT_SPACE_TYPE_DISK;
|
||||
else
|
||||
stats->spaceType = SORT_SPACE_TYPE_MEMORY;
|
||||
stats->spaceUsed = (state->maxSpace + 1023) / 1024;
|
||||
|
||||
switch (state->maxSpaceStatus)
|
||||
{
|
||||
case TSS_SORTEDINMEM:
|
||||
if (state->boundUsed)
|
||||
|
Reference in New Issue
Block a user