1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-12 21:01:52 +03:00

Use FLEXIBLE_ARRAY_MEMBER for HeapTupleHeaderData.t_bits[].

This requires changing quite a few places that were depending on
sizeof(HeapTupleHeaderData), but it seems for the best.

Michael Paquier, some adjustments by me
This commit is contained in:
Tom Lane
2015-02-21 15:13:06 -05:00
parent 3d9b6f31ee
commit e1a11d9311
19 changed files with 102 additions and 99 deletions

View File

@ -4036,11 +4036,11 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
/*
* If we have a whole-row reference, estimate its width as the sum of
* per-column widths plus sizeof(HeapTupleHeaderData).
* per-column widths plus heap tuple header overhead.
*/
if (have_wholerow_var)
{
int32 wholerow_width = sizeof(HeapTupleHeaderData);
int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
if (reloid != InvalidOid)
{
@ -4078,7 +4078,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
static double
relation_byte_size(double tuples, int width)
{
return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
}
/*

View File

@ -2755,7 +2755,7 @@ choose_hashed_grouping(PlannerInfo *root,
*/
/* Estimate per-hash-entry space at tuple width... */
hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
/* plus space for pass-by-ref transition values... */
hashentrysize += agg_costs->transitionSpace;
/* plus the per-hash-entry overhead */
@ -2923,7 +2923,7 @@ choose_hashed_distinct(PlannerInfo *root,
*/
/* Estimate per-hash-entry space at tuple width... */
hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
/* plus the per-hash-entry overhead */
hashentrysize += hash_agg_entry_size(0);

View File

@ -974,12 +974,12 @@ subplan_is_hashable(Plan *plan)
/*
* The estimated size of the subquery result must fit in work_mem. (Note:
* we use sizeof(HeapTupleHeaderData) here even though the tuples will
* actually be stored as MinimalTuples; this provides some fudge factor
* for hashtable overhead.)
* we use heap tuple overhead here even though the tuples will actually be
* stored as MinimalTuples; this provides some fudge factor for hashtable
* overhead.)
*/
subquery_size = plan->plan_rows *
(MAXALIGN(plan->plan_width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
(MAXALIGN(plan->plan_width) + MAXALIGN(SizeofHeapTupleHeader));
if (subquery_size > work_mem * 1024L)
return false;

View File

@ -832,7 +832,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
* Don't do it if it doesn't look like the hashtable will fit into
* work_mem.
*/
hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData));
hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(SizeofMinimalTupleHeader);
if (hashentrysize * dNumGroups > work_mem * 1024L)
return false;

View File

@ -508,7 +508,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
int32 tuple_width;
tuple_width = get_rel_data_width(rel, attr_widths);
tuple_width += sizeof(HeapTupleHeaderData);
tuple_width += MAXALIGN(SizeofHeapTupleHeader);
tuple_width += sizeof(ItemIdData);
/* note: integer division is intentional here */
density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;