1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-14 18:42:34 +03:00

pgindent run for 9.4

This includes removing tabs after periods in C comments, which was
applied to back branches, so this change should not effect backpatching.
This commit is contained in:
Bruce Momjian
2014-05-06 12:12:18 -04:00
parent fb85cd4320
commit 0a78320057
854 changed files with 7848 additions and 7368 deletions

View File

@ -19,7 +19,7 @@
* ExecutorRun accepts direction and count arguments that specify whether
* the plan is to be executed forwards, backwards, and for how many tuples.
* In some cases ExecutorRun may be called multiple times to process all
* the tuples for a plan. It is also acceptable to stop short of executing
* the tuples for a plan. It is also acceptable to stop short of executing
* the whole plan (but only if it is a SELECT).
*
* ExecutorFinish must be called after the final ExecutorRun call and
@ -329,12 +329,12 @@ standard_ExecutorRun(QueryDesc *queryDesc,
* ExecutorFinish
*
* This routine must be called after the last ExecutorRun call.
* It performs cleanup such as firing AFTER triggers. It is
* It performs cleanup such as firing AFTER triggers. It is
* separate from ExecutorEnd because EXPLAIN ANALYZE needs to
* include these actions in the total runtime.
*
* We provide a function hook variable that lets loadable plugins
* get control when ExecutorFinish is called. Such a plugin would
* get control when ExecutorFinish is called. Such a plugin would
* normally call standard_ExecutorFinish().
*
* ----------------------------------------------------------------
@ -565,7 +565,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
* userid to check as: current user unless we have a setuid indication.
*
* Note: GetUserId() is presently fast enough that there's no harm in
* calling it separately for each RTE. If that stops being true, we could
* calling it separately for each RTE. If that stops being true, we could
* call it once in ExecCheckRTPerms and pass the userid down from there.
* But for now, no need for the extra clutter.
*/
@ -1184,7 +1184,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
* if so it doesn't matter which one we pick.) However, it is sometimes
* necessary to fire triggers on other relations; this happens mainly when an
* RI update trigger queues additional triggers on other relations, which will
* be processed in the context of the outer query. For efficiency's sake,
* be processed in the context of the outer query. For efficiency's sake,
* we want to have a ResultRelInfo for those triggers too; that can avoid
* repeated re-opening of the relation. (It also provides a way for EXPLAIN
* ANALYZE to report the runtimes of such triggers.) So we make additional
@ -1221,7 +1221,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
* event got queued, so we need take no new lock here. Also, we need not
* event got queued, so we need take no new lock here. Also, we need not
* recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
@ -1327,7 +1327,7 @@ ExecPostprocessPlan(EState *estate)
/*
* Run any secondary ModifyTable nodes to completion, in case the main
* query did not fetch all rows from them. (We do this to ensure that
* query did not fetch all rows from them. (We do this to ensure that
* such nodes have predictable results.)
*/
foreach(lc, estate->es_auxmodifytables)
@ -1639,7 +1639,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
TupleTableSlot *slot, EState *estate)
{
ExprContext *econtext;
ListCell *l1, *l2;
ListCell *l1,
*l2;
/*
* We will use the EState's per-tuple context for evaluating constraint
@ -1655,7 +1656,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
l2, resultRelInfo->ri_WithCheckOptionExprs)
{
WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
ExprState *wcoExpr = (ExprState *) lfirst(l2);
ExprState *wcoExpr = (ExprState *) lfirst(l2);
/*
* WITH CHECK OPTION checks are intended to ensure that the new tuple
@ -1667,8 +1668,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
if (!ExecQual((List *) wcoExpr, econtext, false))
ereport(ERROR,
(errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
errmsg("new row violates WITH CHECK OPTION for view \"%s\"",
wco->viewname),
errmsg("new row violates WITH CHECK OPTION for view \"%s\"",
wco->viewname),
errdetail("Failing row contains %s.",
ExecBuildSlotValueDescription(slot,
RelationGetDescr(resultRelInfo->ri_RelationDesc),
@ -1681,7 +1682,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
*
* This is intentionally very similar to BuildIndexValueDescription, but
* unlike that function, we truncate long field values (to at most maxfieldlen
* bytes). That seems necessary here since heap field values could be very
* bytes). That seems necessary here since heap field values could be very
* long, whereas index entries typically aren't so wide.
*
* Also, unlike the case with index entries, we need to be prepared to ignore
@ -1875,7 +1876,7 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
*tid = copyTuple->t_self;
/*
* Need to run a recheck subquery. Initialize or reinitialize EPQ state.
* Need to run a recheck subquery. Initialize or reinitialize EPQ state.
*/
EvalPlanQualBegin(epqstate, estate);
@ -1958,7 +1959,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
/*
* If xmin isn't what we're expecting, the slot must have been
* recycled and reused for an unrelated tuple. This implies that
* recycled and reused for an unrelated tuple. This implies that
* the latest version of the row was deleted, so we need do
* nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
@ -2199,7 +2200,7 @@ EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
/*
* Fetch the current row values for any non-locked relations that need
* to be scanned by an EvalPlanQual operation. origslot must have been set
* to be scanned by an EvalPlanQual operation. origslot must have been set
* to contain the current result row (top-level row) that we need to recheck.
*/
void
@ -2428,7 +2429,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
/*
* Each EState must have its own es_epqScanDone state, but if we have
* nested EPQ checks they should share es_epqTuple arrays. This allows
* nested EPQ checks they should share es_epqTuple arrays. This allows
* sub-rechecks to inherit the values being examined by an outer recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
@ -2485,7 +2486,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
* just sharing from the outer query). We do, however, have to close any
* just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
* (There probably shouldn't be any of the latter, but just in case...)
*/