mirror of
https://github.com/postgres/postgres.git
synced 2025-07-03 20:02:46 +03:00
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
This commit is contained in:
@ -151,7 +151,7 @@ execCurrentOf(CurrentOfExpr *cexpr,
|
||||
{
|
||||
ScanState *scanstate;
|
||||
bool lisnull;
|
||||
Oid tuple_tableoid PG_USED_FOR_ASSERTS_ONLY;
|
||||
Oid tuple_tableoid PG_USED_FOR_ASSERTS_ONLY;
|
||||
ItemPointer tuple_tid;
|
||||
|
||||
/*
|
||||
|
@ -80,7 +80,7 @@ static void ExecutePlan(EState *estate, PlanState *planstate,
|
||||
static bool ExecCheckRTEPerms(RangeTblEntry *rte);
|
||||
static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
|
||||
static char *ExecBuildSlotValueDescription(TupleTableSlot *slot,
|
||||
int maxfieldlen);
|
||||
int maxfieldlen);
|
||||
static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
|
||||
Plan *planTree);
|
||||
|
||||
@ -1520,7 +1520,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_NOT_NULL_VIOLATION),
|
||||
errmsg("null value in column \"%s\" violates not-null constraint",
|
||||
NameStr(rel->rd_att->attrs[attrChk - 1]->attname)),
|
||||
NameStr(rel->rd_att->attrs[attrChk - 1]->attname)),
|
||||
errdetail("Failing row contains %s.",
|
||||
ExecBuildSlotValueDescription(slot, 64))));
|
||||
}
|
||||
|
@ -578,15 +578,15 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
/* Get the input slot and attribute number we want */
|
||||
switch (variable->varno)
|
||||
{
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
slot = econtext->ecxt_innertuple;
|
||||
break;
|
||||
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
slot = econtext->ecxt_outertuple;
|
||||
break;
|
||||
|
||||
/* INDEX_VAR is handled by default case */
|
||||
/* INDEX_VAR is handled by default case */
|
||||
|
||||
default: /* get the tuple from the relation being
|
||||
* scanned */
|
||||
@ -763,15 +763,15 @@ ExecEvalScalarVar(ExprState *exprstate, ExprContext *econtext,
|
||||
/* Get the input slot and attribute number we want */
|
||||
switch (variable->varno)
|
||||
{
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
slot = econtext->ecxt_innertuple;
|
||||
break;
|
||||
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
slot = econtext->ecxt_outertuple;
|
||||
break;
|
||||
|
||||
/* INDEX_VAR is handled by default case */
|
||||
/* INDEX_VAR is handled by default case */
|
||||
|
||||
default: /* get the tuple from the relation being
|
||||
* scanned */
|
||||
@ -808,15 +808,15 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
|
||||
/* Get the input slot we want */
|
||||
switch (variable->varno)
|
||||
{
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
slot = econtext->ecxt_innertuple;
|
||||
break;
|
||||
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
slot = econtext->ecxt_outertuple;
|
||||
break;
|
||||
|
||||
/* INDEX_VAR is handled by default case */
|
||||
/* INDEX_VAR is handled by default case */
|
||||
|
||||
default: /* get the tuple from the relation being
|
||||
* scanned */
|
||||
@ -879,15 +879,15 @@ ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext,
|
||||
/* Get the input slot we want */
|
||||
switch (variable->varno)
|
||||
{
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
case INNER_VAR: /* get the tuple from the inner node */
|
||||
slot = econtext->ecxt_innertuple;
|
||||
break;
|
||||
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
case OUTER_VAR: /* get the tuple from the outer node */
|
||||
slot = econtext->ecxt_outertuple;
|
||||
break;
|
||||
|
||||
/* INDEX_VAR is handled by default case */
|
||||
/* INDEX_VAR is handled by default case */
|
||||
|
||||
default: /* get the tuple from the relation being
|
||||
* scanned */
|
||||
|
@ -578,7 +578,7 @@ ExecBuildProjectionInfo(List *targetList,
|
||||
projInfo->pi_lastOuterVar = attnum;
|
||||
break;
|
||||
|
||||
/* INDEX_VAR is handled by default case */
|
||||
/* INDEX_VAR is handled by default case */
|
||||
|
||||
default:
|
||||
varSlotOffsets[numSimpleVars] = offsetof(ExprContext,
|
||||
@ -638,7 +638,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
|
||||
projInfo->pi_lastOuterVar = attnum;
|
||||
break;
|
||||
|
||||
/* INDEX_VAR is handled by default case */
|
||||
/* INDEX_VAR is handled by default case */
|
||||
|
||||
default:
|
||||
if (projInfo->pi_lastScanVar < attnum)
|
||||
|
@ -128,11 +128,11 @@ typedef struct SQLFunctionParseInfo
|
||||
/* non-export function prototypes */
|
||||
static Node *sql_fn_param_ref(ParseState *pstate, ParamRef *pref);
|
||||
static Node *sql_fn_post_column_ref(ParseState *pstate,
|
||||
ColumnRef *cref, Node *var);
|
||||
ColumnRef *cref, Node *var);
|
||||
static Node *sql_fn_make_param(SQLFunctionParseInfoPtr pinfo,
|
||||
int paramno, int location);
|
||||
int paramno, int location);
|
||||
static Node *sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo,
|
||||
const char *paramname, int location);
|
||||
const char *paramname, int location);
|
||||
static List *init_execution_state(List *queryTree_list,
|
||||
SQLFunctionCachePtr fcache,
|
||||
bool lazyEvalOK);
|
||||
@ -227,13 +227,13 @@ prepare_sql_fn_parse_info(HeapTuple procedureTuple,
|
||||
Anum_pg_proc_proargnames,
|
||||
&isNull);
|
||||
if (isNull)
|
||||
proargnames = PointerGetDatum(NULL); /* just to be sure */
|
||||
proargnames = PointerGetDatum(NULL); /* just to be sure */
|
||||
|
||||
proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, procedureTuple,
|
||||
Anum_pg_proc_proargmodes,
|
||||
&isNull);
|
||||
if (isNull)
|
||||
proargmodes = PointerGetDatum(NULL); /* just to be sure */
|
||||
proargmodes = PointerGetDatum(NULL); /* just to be sure */
|
||||
|
||||
n_arg_names = get_func_input_arg_names(proargnames, proargmodes,
|
||||
&pinfo->argnames);
|
||||
@ -422,7 +422,7 @@ static Node *
|
||||
sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo,
|
||||
const char *paramname, int location)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
if (pinfo->argnames == NULL)
|
||||
return NULL;
|
||||
|
@ -66,6 +66,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
TIDBitmap *tbm;
|
||||
TBMIterator *tbmiterator;
|
||||
TBMIterateResult *tbmres;
|
||||
|
||||
#ifdef USE_PREFETCH
|
||||
TBMIterator *prefetch_iterator;
|
||||
#endif
|
||||
@ -355,7 +356,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
|
||||
{
|
||||
OffsetNumber offnum = tbmres->offsets[curslot];
|
||||
ItemPointerData tid;
|
||||
HeapTupleData heapTuple;
|
||||
HeapTupleData heapTuple;
|
||||
|
||||
ItemPointerSet(&tid, page, offnum);
|
||||
if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
|
||||
|
@ -86,7 +86,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
|
||||
* Note on Memory Ordering Effects: visibilitymap_test does not lock
|
||||
* the visibility map buffer, and therefore the result we read here
|
||||
* could be slightly stale. However, it can't be stale enough to
|
||||
* matter. It suffices to show that (1) there is a read barrier
|
||||
* matter. It suffices to show that (1) there is a read barrier
|
||||
* between the time we read the index TID and the time we test the
|
||||
* visibility map; and (2) there is a write barrier between the time
|
||||
* some other concurrent process clears the visibility map bit and the
|
||||
@ -106,12 +106,12 @@ IndexOnlyNext(IndexOnlyScanState *node)
|
||||
node->ioss_HeapFetches++;
|
||||
tuple = index_fetch_heap(scandesc);
|
||||
if (tuple == NULL)
|
||||
continue; /* no visible tuple, try next index entry */
|
||||
continue; /* no visible tuple, try next index entry */
|
||||
|
||||
/*
|
||||
* Only MVCC snapshots are supported here, so there should be no
|
||||
* need to keep following the HOT chain once a visible entry has
|
||||
* been found. If we did want to allow that, we'd need to keep
|
||||
* been found. If we did want to allow that, we'd need to keep
|
||||
* more state to remember not to call index_getnext_tid next time.
|
||||
*/
|
||||
if (scandesc->xs_continue_hot)
|
||||
@ -120,7 +120,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
|
||||
/*
|
||||
* Note: at this point we are holding a pin on the heap page, as
|
||||
* recorded in scandesc->xs_cbuf. We could release that pin now,
|
||||
* but it's not clear whether it's a win to do so. The next index
|
||||
* but it's not clear whether it's a win to do so. The next index
|
||||
* entry might require a visit to the same heap page.
|
||||
*/
|
||||
}
|
||||
@ -176,8 +176,8 @@ StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, TupleDesc itupdesc)
|
||||
* Note: we must use the tupdesc supplied by the AM in index_getattr, not
|
||||
* the slot's tupdesc, in case the latter has different datatypes (this
|
||||
* happens for btree name_ops in particular). They'd better have the same
|
||||
* number of columns though, as well as being datatype-compatible which
|
||||
* is something we can't so easily check.
|
||||
* number of columns though, as well as being datatype-compatible which is
|
||||
* something we can't so easily check.
|
||||
*/
|
||||
Assert(slot->tts_tupleDescriptor->natts == nindexatts);
|
||||
|
||||
@ -494,10 +494,10 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
|
||||
* Initialize scan descriptor.
|
||||
*/
|
||||
indexstate->ioss_ScanDesc = index_beginscan(currentRelation,
|
||||
indexstate->ioss_RelationDesc,
|
||||
estate->es_snapshot,
|
||||
indexstate->ioss_NumScanKeys,
|
||||
indexstate->ioss_NumOrderByKeys);
|
||||
indexstate->ioss_RelationDesc,
|
||||
estate->es_snapshot,
|
||||
indexstate->ioss_NumScanKeys,
|
||||
indexstate->ioss_NumOrderByKeys);
|
||||
|
||||
/* Set it up for index-only scan */
|
||||
indexstate->ioss_ScanDesc->xs_want_itup = true;
|
||||
|
@ -66,7 +66,7 @@ ExecMaterial(MaterialState *node)
|
||||
* Allocate a second read pointer to serve as the mark. We know it
|
||||
* must have index 1, so needn't store that.
|
||||
*/
|
||||
int ptrno PG_USED_FOR_ASSERTS_ONLY;
|
||||
int ptrno PG_USED_FOR_ASSERTS_ONLY;
|
||||
|
||||
ptrno = tuplestore_alloc_read_pointer(tuplestorestate,
|
||||
node->eflags);
|
||||
|
@ -130,7 +130,7 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
|
||||
|
||||
for (i = 0; i < node->numCols; i++)
|
||||
{
|
||||
SortSupport sortKey = mergestate->ms_sortkeys + i;
|
||||
SortSupport sortKey = mergestate->ms_sortkeys + i;
|
||||
|
||||
sortKey->ssup_cxt = CurrentMemoryContext;
|
||||
sortKey->ssup_collation = node->collations[i];
|
||||
@ -276,7 +276,7 @@ heap_compare_slots(MergeAppendState *node, SlotNumber slot1, SlotNumber slot2)
|
||||
|
||||
for (nkey = 0; nkey < node->ms_nkeys; nkey++)
|
||||
{
|
||||
SortSupport sortKey = node->ms_sortkeys + nkey;
|
||||
SortSupport sortKey = node->ms_sortkeys + nkey;
|
||||
AttrNumber attno = sortKey->ssup_attno;
|
||||
Datum datum1,
|
||||
datum2;
|
||||
|
@ -247,7 +247,7 @@ MJExamineQuals(List *mergeclauses,
|
||||
op_lefttype,
|
||||
op_righttype,
|
||||
BTORDER_PROC);
|
||||
if (!OidIsValid(sortfunc)) /* should not happen */
|
||||
if (!OidIsValid(sortfunc)) /* should not happen */
|
||||
elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
|
||||
BTORDER_PROC, op_lefttype, op_righttype, opfamily);
|
||||
/* We'll use a shim to call the old-style btree comparator */
|
||||
@ -405,7 +405,7 @@ MJCompare(MergeJoinState *mergestate)
|
||||
*/
|
||||
if (clause->lisnull && clause->risnull)
|
||||
{
|
||||
nulleqnull = true; /* NULL "=" NULL */
|
||||
nulleqnull = true; /* NULL "=" NULL */
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -419,8 +419,8 @@ MJCompare(MergeJoinState *mergestate)
|
||||
|
||||
/*
|
||||
* If we had any NULL-vs-NULL inputs, we do not want to report that the
|
||||
* tuples are equal. Instead, if result is still 0, change it to +1.
|
||||
* This will result in advancing the inner side of the join.
|
||||
* tuples are equal. Instead, if result is still 0, change it to +1. This
|
||||
* will result in advancing the inner side of the join.
|
||||
*
|
||||
* Likewise, if there was a constant-false joinqual, do not report
|
||||
* equality. We have to check this as part of the mergequals, else the
|
||||
|
@ -950,8 +950,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
|
||||
* If there are indices on the result relation, open them and save
|
||||
* descriptors in the result relation info, so that we can add new
|
||||
* index entries for the tuples we add/update. We need not do this
|
||||
* for a DELETE, however, since deletion doesn't affect indexes.
|
||||
* Also, inside an EvalPlanQual operation, the indexes might be open
|
||||
* for a DELETE, however, since deletion doesn't affect indexes. Also,
|
||||
* inside an EvalPlanQual operation, the indexes might be open
|
||||
* already, since we share the resultrel state with the original
|
||||
* query.
|
||||
*/
|
||||
|
@ -344,7 +344,7 @@ setop_fill_hash_table(SetOpState *setopstate)
|
||||
SetOp *node = (SetOp *) setopstate->ps.plan;
|
||||
PlanState *outerPlan;
|
||||
int firstFlag;
|
||||
bool in_first_rel PG_USED_FOR_ASSERTS_ONLY;
|
||||
bool in_first_rel PG_USED_FOR_ASSERTS_ONLY;
|
||||
|
||||
/*
|
||||
* get state info from node
|
||||
|
@ -1674,8 +1674,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
|
||||
raw_parsetree_list = pg_parse_query(src);
|
||||
|
||||
/*
|
||||
* Do parse analysis and rule rewrite for each raw parsetree, storing
|
||||
* the results into unsaved plancache entries.
|
||||
* Do parse analysis and rule rewrite for each raw parsetree, storing the
|
||||
* results into unsaved plancache entries.
|
||||
*/
|
||||
plancache_list = NIL;
|
||||
|
||||
@ -1686,8 +1686,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
|
||||
CachedPlanSource *plansource;
|
||||
|
||||
/*
|
||||
* Create the CachedPlanSource before we do parse analysis, since
|
||||
* it needs to see the unmodified raw parse tree.
|
||||
* Create the CachedPlanSource before we do parse analysis, since it
|
||||
* needs to see the unmodified raw parse tree.
|
||||
*/
|
||||
plansource = CreateCachedPlan(parsetree,
|
||||
src,
|
||||
@ -1722,7 +1722,7 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
|
||||
plan->parserSetup,
|
||||
plan->parserSetupArg,
|
||||
cursor_options,
|
||||
false); /* not fixed result */
|
||||
false); /* not fixed result */
|
||||
|
||||
plancache_list = lappend(plancache_list, plansource);
|
||||
}
|
||||
@ -1907,7 +1907,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
}
|
||||
else
|
||||
{
|
||||
char completionTag[COMPLETION_TAG_BUFSIZE];
|
||||
char completionTag[COMPLETION_TAG_BUFSIZE];
|
||||
|
||||
ProcessUtility(stmt,
|
||||
plansource->query_string,
|
||||
@ -2335,9 +2335,9 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan)
|
||||
|
||||
/*
|
||||
* Reparent all the CachedPlanSources into the procedure context. In
|
||||
* theory this could fail partway through due to the pallocs, but we
|
||||
* don't care too much since both the procedure context and the executor
|
||||
* context would go away on error.
|
||||
* theory this could fail partway through due to the pallocs, but we don't
|
||||
* care too much since both the procedure context and the executor context
|
||||
* would go away on error.
|
||||
*/
|
||||
foreach(lc, plan->plancache_list)
|
||||
{
|
||||
|
Reference in New Issue
Block a user