1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-04 20:11:56 +03:00

Change "long" numGroups fields to be Cardinality (i.e., double).

We've been nibbling away at removing uses of "long" for a long time,
since its width is platform-dependent.  Here's one more: change the
remaining "long" fields in Plan nodes to Cardinality, since the three
surviving examples all represent group-count estimates.  The upstream
planner code was converted to Cardinality some time ago; for example
the corresponding fields in Path nodes are type Cardinality, as are
the arguments of the make_foo_path functions.  Downstream in the
executor, it turns out that these all feed to the table-size argument
of BuildTupleHashTable.  Change that to "double" as well, and fix it
so that it safely clamps out-of-range values to the uint32 limit of
simplehash.h, as was not being done before.

Essentially, this is removing all the artificial datatype-dependent
limitations on these values from upstream processing, and applying
just one clamp at the moment where we're forced to do so by the
datatype choices of simplehash.h.

Also, remove BuildTupleHashTable's misguided attempt to enforce
work_mem/hash_mem_limit.  It doesn't have enough information
(particularly not the expected tuple width) to do that accurately,
and it has no real business second-guessing the caller's choice.
For all these plan types, it's really the planner's responsibility
to not choose a hashed implementation if the hashtable is expected
to exceed hash_mem_limit.  The previous patch improved the
accuracy of those estimates, and even if BuildTupleHashTable had
more information it should arrive at the same conclusions.

Reported-by: Jeff Janes <jeff.janes@gmail.com>
Author: Tom Lane <tgl@sss.pgh.pa.us>
Reviewed-by: David Rowley <dgrowleyml@gmail.com>
Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com
This commit is contained in:
Tom Lane
2025-11-02 16:57:43 -05:00
parent 1ea5bdb00b
commit 8f29467c57
11 changed files with 55 additions and 95 deletions

View File

@@ -402,12 +402,12 @@ static void find_cols(AggState *aggstate, Bitmapset **aggregated,
Bitmapset **unaggregated);
static bool find_cols_walker(Node *node, FindColsContext *context);
static void build_hash_tables(AggState *aggstate);
static void build_hash_table(AggState *aggstate, int setno, long nbuckets);
static void build_hash_table(AggState *aggstate, int setno, double nbuckets);
static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
bool nullcheck);
static void hash_create_memory(AggState *aggstate);
static long hash_choose_num_buckets(double hashentrysize,
long ngroups, Size memory);
static double hash_choose_num_buckets(double hashentrysize,
double ngroups, Size memory);
static int hash_choose_num_partitions(double input_groups,
double hashentrysize,
int used_bits,
@@ -1469,7 +1469,7 @@ build_hash_tables(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
long nbuckets;
double nbuckets;
Size memory;
if (perhash->hashtable != NULL)
@@ -1478,8 +1478,6 @@ build_hash_tables(AggState *aggstate)
continue;
}
Assert(perhash->aggnode->numGroups > 0);
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
@@ -1505,7 +1503,7 @@ build_hash_tables(AggState *aggstate)
* Build a single hashtable for this grouping set.
*/
static void
build_hash_table(AggState *aggstate, int setno, long nbuckets)
build_hash_table(AggState *aggstate, int setno, double nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
MemoryContext metacxt = aggstate->hash_metacxt;
@@ -2053,11 +2051,11 @@ hash_create_memory(AggState *aggstate)
/*
* Choose a reasonable number of buckets for the initial hash table size.
*/
static long
hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
static double
hash_choose_num_buckets(double hashentrysize, double ngroups, Size memory)
{
long max_nbuckets;
long nbuckets = ngroups;
double max_nbuckets;
double nbuckets = ngroups;
max_nbuckets = memory / hashentrysize;
@@ -2065,12 +2063,16 @@ hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
* Underestimating is better than overestimating. Too many buckets crowd
* out space for group keys and transition state values.
*/
max_nbuckets >>= 1;
max_nbuckets /= 2;
if (nbuckets > max_nbuckets)
nbuckets = max_nbuckets;
return Max(nbuckets, 1);
/*
* BuildTupleHashTable will clamp any obviously-insane result, so we don't
* need to be too careful here.
*/
return nbuckets;
}
/*
@@ -3686,7 +3688,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (use_hashing)
{
Plan *outerplan = outerPlan(node);
uint64 totalGroups = 0;
double totalGroups = 0;
aggstate->hash_spill_rslot = ExecInitExtraTupleSlot(estate, scanDesc,
&TTSOpsMinimalTuple);