1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

Manual cleanup of pgindent results.

Fix some places where pgindent did silly stuff, often because project
style wasn't followed to begin with.  (I've not touched the atomics
headers, though.)
This commit is contained in:
Tom Lane
2015-05-24 15:04:10 -04:00
parent 17b48a1a9f
commit 2aa0476dc3
10 changed files with 68 additions and 87 deletions

View File

@ -645,10 +645,12 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
* overall targetlist's econtext. GroupingFunc arguments are never
* evaluated at all.
*/
if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
if (IsA(node, Aggref))
return false;
if (IsA(node, WindowFunc))
return false;
if (IsA(node, GroupingFunc))
return false;
return expression_tree_walker(node, get_last_attnums,
(void *) projInfo);
}

View File

@ -1519,8 +1519,9 @@ agg_retrieve_direct(AggState *aggstate)
/*
* get state info from node
*
* econtext is the per-output-tuple expression context tmpcontext is the
* per-input-tuple expression context
* econtext is the per-output-tuple expression context
*
* tmpcontext is the per-input-tuple expression context
*/
econtext = aggstate->ss.ps.ps_ExprContext;
tmpcontext = aggstate->tmpcontext;
@ -1609,7 +1610,7 @@ agg_retrieve_direct(AggState *aggstate)
else
nextSetSize = 0;
/*-
/*----------
* If a subgroup for the current grouping set is present, project it.
*
* We have a new group if:
@ -1624,6 +1625,7 @@ agg_retrieve_direct(AggState *aggstate)
* AND
* - the previous and pending rows differ on the grouping columns
* of the next grouping set
*----------
*/
if (aggstate->input_done ||
(node->aggstrategy == AGG_SORTED &&

View File

@ -527,8 +527,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
* Buckets are simple pointers to hashjoin tuples, while tupsize
* includes the pointer, hash code, and MinimalTupleData. So buckets
* should never really exceed 25% of work_mem (even for
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not
* 2^N bytes, where we might get more * because of doubling. So let's
* NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
* 2^N bytes, where we might get more because of doubling. So let's
* look for 50% here.
*/
Assert(bucket_bytes <= hash_table_bytes / 2);
@ -691,9 +691,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
if (batchno == curbatch)
{
/* keep tuple in memory - copy it into the new chunk */
HashJoinTuple copyTuple =
(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
HashJoinTuple copyTuple;
copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
memcpy(copyTuple, hashTuple, hashTupleSize);
/* and add it back to the appropriate bucket */