1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-03 20:02:46 +03:00

Post-PG 10 beta1 pgindent run

perltidy run not included.
This commit is contained in:
Bruce Momjian
2017-05-17 16:31:56 -04:00
parent 8a94332478
commit a6fd7b7a5f
310 changed files with 3338 additions and 3171 deletions

View File

@ -413,12 +413,13 @@ ExecSupportsMarkRestore(Path *pathnode)
return true;
case T_CustomScan:
{
CustomPath *customPath = castNode(CustomPath, pathnode);
if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
return true;
return false;
}
{
CustomPath *customPath = castNode(CustomPath, pathnode);
if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
return true;
return false;
}
case T_Result:
/*

View File

@ -380,7 +380,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
hashtable->cur_eq_funcs = hashtable->tab_eq_funcs;
key = NULL; /* flag to reference inputslot */
key = NULL; /* flag to reference inputslot */
if (isnew)
{

View File

@ -868,7 +868,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
estate->es_num_root_result_relations = 0;
if (plannedstmt->nonleafResultRelations)
{
int num_roots = list_length(plannedstmt->rootResultRelations);
int num_roots = list_length(plannedstmt->rootResultRelations);
/*
* Firstly, build ResultRelInfos for all the partitioned table
@ -876,7 +876,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
* triggers, if any.
*/
resultRelInfos = (ResultRelInfo *)
palloc(num_roots * sizeof(ResultRelInfo));
palloc(num_roots * sizeof(ResultRelInfo));
resultRelInfo = resultRelInfos;
foreach(l, plannedstmt->rootResultRelations)
{
@ -900,7 +900,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
/* Simply lock the rest of them. */
foreach(l, plannedstmt->nonleafResultRelations)
{
Index resultRelIndex = lfirst_int(l);
Index resultRelIndex = lfirst_int(l);
/* We locked the roots above. */
if (!list_member_int(plannedstmt->rootResultRelations,
@ -1919,13 +1919,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
if (resultRelInfo->ri_PartitionRoot)
{
HeapTuple tuple = ExecFetchSlotTuple(slot);
TupleConversionMap *map;
TupleConversionMap *map;
rel = resultRelInfo->ri_PartitionRoot;
tupdesc = RelationGetDescr(rel);
/* a reverse map */
map = convert_tuples_by_name(orig_tupdesc, tupdesc,
gettext_noop("could not convert row type"));
gettext_noop("could not convert row type"));
if (map != NULL)
{
tuple = do_convert_tuple(tuple, map);
@ -1966,13 +1966,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
{
HeapTuple tuple = ExecFetchSlotTuple(slot);
TupleDesc old_tupdesc = RelationGetDescr(rel);
TupleConversionMap *map;
TupleConversionMap *map;
rel = resultRelInfo->ri_PartitionRoot;
tupdesc = RelationGetDescr(rel);
/* a reverse map */
map = convert_tuples_by_name(old_tupdesc, tupdesc,
gettext_noop("could not convert row type"));
gettext_noop("could not convert row type"));
if (map != NULL)
{
tuple = do_convert_tuple(tuple, map);
@ -2008,13 +2008,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
{
HeapTuple tuple = ExecFetchSlotTuple(slot);
TupleDesc old_tupdesc = RelationGetDescr(rel);
TupleConversionMap *map;
TupleConversionMap *map;
rel = resultRelInfo->ri_PartitionRoot;
tupdesc = RelationGetDescr(rel);
/* a reverse map */
map = convert_tuples_by_name(old_tupdesc, tupdesc,
gettext_noop("could not convert row type"));
gettext_noop("could not convert row type"));
if (map != NULL)
{
tuple = do_convert_tuple(tuple, map);
@ -3340,7 +3340,7 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("no partition of relation \"%s\" found for row",
RelationGetRelationName(failed_rel)),
val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
}
return result;
@ -3359,8 +3359,8 @@ ExecBuildSlotPartitionKeyDescription(Relation rel,
bool *isnull,
int maxfieldlen)
{
StringInfoData buf;
PartitionKey key = RelationGetPartitionKey(rel);
StringInfoData buf;
PartitionKey key = RelationGetPartitionKey(rel);
int partnatts = get_partition_natts(key);
int i;
Oid relid = RelationGetRelid(rel);

View File

@ -608,9 +608,9 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
/*
* Also store the per-worker detail.
*
* Worker instrumentation should be allocated in the same context as
* the regular instrumentation information, which is the per-query
* context. Switch into per-query memory context.
* Worker instrumentation should be allocated in the same context as the
* regular instrumentation information, which is the per-query context.
* Switch into per-query memory context.
*/
oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));

View File

@ -259,7 +259,7 @@ ExecInitNode(Plan *node, EState *estate, int eflags)
case T_NamedTuplestoreScan:
result = (PlanState *) ExecInitNamedTuplestoreScan((NamedTuplestoreScan *) node,
estate, eflags);
estate, eflags);
break;
case T_WorkTableScan:

View File

@ -116,15 +116,15 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
TupleTableSlot *searchslot,
TupleTableSlot *outslot)
{
HeapTuple scantuple;
ScanKeyData skey[INDEX_MAX_KEYS];
IndexScanDesc scan;
SnapshotData snap;
TransactionId xwait;
Relation idxrel;
bool found;
HeapTuple scantuple;
ScanKeyData skey[INDEX_MAX_KEYS];
IndexScanDesc scan;
SnapshotData snap;
TransactionId xwait;
Relation idxrel;
bool found;
/* Open the index.*/
/* Open the index. */
idxrel = index_open(idxoid, RowExclusiveLock);
/* Start an index scan. */
@ -152,8 +152,8 @@ retry:
snap.xmin : snap.xmax;
/*
* If the tuple is locked, wait for locking transaction to finish
* and retry.
* If the tuple is locked, wait for locking transaction to finish and
* retry.
*/
if (TransactionIdIsValid(xwait))
{
@ -165,7 +165,7 @@ retry:
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
Buffer buf;
Buffer buf;
HeapUpdateFailureData hufd;
HTSU_Result res;
HeapTupleData locktup;
@ -177,7 +177,7 @@ retry:
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
lockmode,
LockWaitBlock,
false /* don't follow updates */,
false /* don't follow updates */ ,
&buf, &hufd);
/* the tuple slot already has the buffer pinned */
ReleaseBuffer(buf);
@ -219,7 +219,7 @@ retry:
* to use.
*/
static bool
tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot)
tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot)
{
Datum values[MaxTupleAttributeNumber];
bool isnull[MaxTupleAttributeNumber];
@ -267,12 +267,12 @@ bool
RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode,
TupleTableSlot *searchslot, TupleTableSlot *outslot)
{
HeapTuple scantuple;
HeapScanDesc scan;
SnapshotData snap;
TransactionId xwait;
bool found;
TupleDesc desc = RelationGetDescr(rel);
HeapTuple scantuple;
HeapScanDesc scan;
SnapshotData snap;
TransactionId xwait;
bool found;
TupleDesc desc = RelationGetDescr(rel);
Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor));
@ -299,8 +299,8 @@ retry:
snap.xmin : snap.xmax;
/*
* If the tuple is locked, wait for locking transaction to finish
* and retry.
* If the tuple is locked, wait for locking transaction to finish and
* retry.
*/
if (TransactionIdIsValid(xwait))
{
@ -312,7 +312,7 @@ retry:
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
Buffer buf;
Buffer buf;
HeapUpdateFailureData hufd;
HTSU_Result res;
HeapTupleData locktup;
@ -324,7 +324,7 @@ retry:
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
lockmode,
LockWaitBlock,
false /* don't follow updates */,
false /* don't follow updates */ ,
&buf, &hufd);
/* the tuple slot already has the buffer pinned */
ReleaseBuffer(buf);
@ -363,10 +363,10 @@ retry:
void
ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
{
bool skip_tuple = false;
HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
bool skip_tuple = false;
HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@ -379,7 +379,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
{
slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
if (slot == NULL) /* "do nothing" */
if (slot == NULL) /* "do nothing" */
skip_tuple = true;
}
@ -420,10 +420,10 @@ void
ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
TupleTableSlot *searchslot, TupleTableSlot *slot)
{
bool skip_tuple = false;
HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
bool skip_tuple = false;
HeapTuple tuple;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@ -438,7 +438,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
&searchslot->tts_tuple->t_self,
NULL, slot);
if (slot == NULL) /* "do nothing" */
if (slot == NULL) /* "do nothing" */
skip_tuple = true;
}
@ -482,9 +482,9 @@ void
ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
TupleTableSlot *searchslot)
{
bool skip_tuple = false;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
bool skip_tuple = false;
ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
Relation rel = resultRelInfo->ri_RelationDesc;
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@ -568,6 +568,6 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
if (relkind != RELKIND_RELATION)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("logical replication target relation \"%s.%s\" is not a table",
nspname, relname)));
errmsg("logical replication target relation \"%s.%s\" is not a table",
nspname, relname)));
}

View File

@ -826,14 +826,14 @@ void
ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate)
{
PlannedStmt *stmt = estate->es_plannedstmt;
ListCell *lc;
ListCell *lc;
foreach(lc, partitioned_rels)
{
ListCell *l;
Index rti = lfirst_int(lc);
bool is_result_rel = false;
Oid relid = getrelid(rti, estate->es_range_table);
Index rti = lfirst_int(lc);
bool is_result_rel = false;
Oid relid = getrelid(rti, estate->es_range_table);
/* If this is a result relation, already locked in InitPlan */
foreach(l, stmt->nonleafResultRelations)

View File

@ -471,7 +471,7 @@ typedef struct AggStatePerGroupData
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
*/
} AggStatePerGroupData;
} AggStatePerGroupData;
/*
* AggStatePerPhaseData - per-grouping-set-phase state
@ -515,7 +515,7 @@ typedef struct AggStatePerHashData
AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */
AttrNumber *hashGrpColIdxHash; /* indices in hashtbl tuples */
Agg *aggnode; /* original Agg node, for numGroups etc. */
} AggStatePerHashData;
} AggStatePerHashData;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);

View File

@ -129,8 +129,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
Assert(!(eflags & EXEC_FLAG_MARK));
/*
* Lock the non-leaf tables in the partition tree controlled by this
* node. It's a no-op for non-partitioned parent tables.
* Lock the non-leaf tables in the partition tree controlled by this node.
* It's a no-op for non-partitioned parent tables.
*/
ExecLockNonLeafAppendTables(node->partitioned_rels, estate);

View File

@ -506,8 +506,9 @@ BitmapAdjustPrefetchIterator(BitmapHeapScanState *node,
* In case of shared mode, we can not ensure that the current
* blockno of the main iterator and that of the prefetch iterator
* are same. It's possible that whatever blockno we are
* prefetching will be processed by another process. Therefore, we
* don't validate the blockno here as we do in non-parallel case.
* prefetching will be processed by another process. Therefore,
* we don't validate the blockno here as we do in non-parallel
* case.
*/
if (prefetch_iterator)
tbm_shared_iterate(prefetch_iterator);

View File

@ -225,7 +225,7 @@ ExecGather(GatherState *node)
void
ExecEndGather(GatherState *node)
{
ExecEndNode(outerPlanState(node)); /* let children clean up first */
ExecEndNode(outerPlanState(node)); /* let children clean up first */
ExecShutdownGather(node);
ExecFreeExprContext(&node->ps);
ExecClearTuple(node->ps.ps_ResultTupleSlot);

View File

@ -35,7 +35,7 @@ typedef struct GMReaderTupleBuffer
int readCounter;
int nTuples;
bool done;
} GMReaderTupleBuffer;
} GMReaderTupleBuffer;
/*
* When we read tuples from workers, it's a good idea to read several at once
@ -230,17 +230,17 @@ ExecGatherMerge(GatherMergeState *node)
ResetExprContext(econtext);
/*
* Get next tuple, either from one of our workers, or by running the
* plan ourselves.
* Get next tuple, either from one of our workers, or by running the plan
* ourselves.
*/
slot = gather_merge_getnext(node);
if (TupIsNull(slot))
return NULL;
/*
* form the result tuple using ExecProject(), and return it --- unless
* the projection produces an empty set, in which case we must loop
* back around for another tuple
* form the result tuple using ExecProject(), and return it --- unless the
* projection produces an empty set, in which case we must loop back
* around for another tuple
*/
econtext->ecxt_outertuple = slot;
return ExecProject(node->ps.ps_ProjInfo);
@ -255,7 +255,7 @@ ExecGatherMerge(GatherMergeState *node)
void
ExecEndGatherMerge(GatherMergeState *node)
{
ExecEndNode(outerPlanState(node)); /* let children clean up first */
ExecEndNode(outerPlanState(node)); /* let children clean up first */
ExecShutdownGatherMerge(node);
ExecFreeExprContext(&node->ps);
ExecClearTuple(node->ps.ps_ResultTupleSlot);
@ -534,8 +534,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
HeapTuple tup = NULL;
/*
* If we're being asked to generate a tuple from the leader, then we
* just call ExecProcNode as normal to produce one.
* If we're being asked to generate a tuple from the leader, then we just
* call ExecProcNode as normal to produce one.
*/
if (gm_state->nreaders == reader)
{
@ -582,8 +582,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
&tuple_buffer->done));
/*
* Attempt to read more tuples in nowait mode and store them in
* the tuple array.
* Attempt to read more tuples in nowait mode and store them in the
* tuple array.
*/
if (HeapTupleIsValid(tup))
form_tuple_array(gm_state, reader);

View File

@ -72,8 +72,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
* Lock the non-leaf tables in the partition tree controlled by this
* node. It's a no-op for non-partitioned parent tables.
* Lock the non-leaf tables in the partition tree controlled by this node.
* It's a no-op for non-partitioned parent tables.
*/
ExecLockNonLeafAppendTables(node->partitioned_rels, estate);

View File

@ -1328,7 +1328,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
static void
fireBSTriggers(ModifyTableState *node)
{
ResultRelInfo *resultRelInfo = node->resultRelInfo;
ResultRelInfo *resultRelInfo = node->resultRelInfo;
/*
* If the node modifies a partitioned table, we must fire its triggers.
@ -1364,7 +1364,7 @@ fireBSTriggers(ModifyTableState *node)
static void
fireASTriggers(ModifyTableState *node)
{
ResultRelInfo *resultRelInfo = node->resultRelInfo;
ResultRelInfo *resultRelInfo = node->resultRelInfo;
/*
* If the node modifies a partitioned table, we must fire its triggers.
@ -1676,7 +1676,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/* If modifying a partitioned table, initialize the root table info */
if (node->rootResultRelIndex >= 0)
mtstate->rootResultRelInfo = estate->es_root_result_relations +
node->rootResultRelIndex;
node->rootResultRelIndex;
mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
mtstate->mt_nplans = nplans;
@ -1753,12 +1753,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/* The root table RT index is at the head of the partitioned_rels list */
if (node->partitioned_rels)
{
Index root_rti;
Oid root_oid;
Index root_rti;
Oid root_oid;
root_rti = linitial_int(node->partitioned_rels);
root_oid = getrelid(root_rti, estate->es_range_table);
rel = heap_open(root_oid, NoLock); /* locked by InitPlan */
rel = heap_open(root_oid, NoLock); /* locked by InitPlan */
}
else
rel = mtstate->resultRelInfo->ri_RelationDesc;
@ -1815,15 +1815,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
}
/*
* Build WITH CHECK OPTION constraints for each leaf partition rel.
* Note that we didn't build the withCheckOptionList for each partition
* within the planner, but simple translation of the varattnos for each
* partition will suffice. This only occurs for the INSERT case;
* UPDATE/DELETE cases are handled above.
* Build WITH CHECK OPTION constraints for each leaf partition rel. Note
* that we didn't build the withCheckOptionList for each partition within
* the planner, but simple translation of the varattnos for each partition
* will suffice. This only occurs for the INSERT case; UPDATE/DELETE
* cases are handled above.
*/
if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
{
List *wcoList;
List *wcoList;
Assert(operation == CMD_INSERT);
resultRelInfo = mtstate->mt_partitions;

View File

@ -120,7 +120,7 @@ ExecProjectSRF(ProjectSetState *node, bool continuing)
{
TupleTableSlot *resultSlot = node->ps.ps_ResultTupleSlot;
ExprContext *econtext = node->ps.ps_ExprContext;
bool hassrf PG_USED_FOR_ASSERTS_ONLY;
bool hassrf PG_USED_FOR_ASSERTS_ONLY;
bool hasresult;
int argno;

View File

@ -64,7 +64,7 @@ typedef struct SetOpStatePerGroupData
{
long numLeft; /* number of left-input dups in group */
long numRight; /* number of right-input dups in group */
} SetOpStatePerGroupData;
} SetOpStatePerGroupData;
static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate);

View File

@ -288,7 +288,7 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext)
PG_TRY();
{
routine->InitOpaque(tstate,
tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
/*
* If evaluating the document expression returns NULL, the table
@ -343,7 +343,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc)
int colno;
Datum value;
int ordinalitycol =
((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
/*
* Install the document as a possibly-toasted Datum into the tablefunc
@ -443,8 +443,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
ExecClearTuple(tstate->ss.ss_ScanTupleSlot);
/*
* Obtain the value of each column for this row, installing them into the
* slot; then add the tuple to the tuplestore.
* Obtain the value of each column for this row, installing them into
* the slot; then add the tuple to the tuplestore.
*/
for (colno = 0; colno < natts; colno++)
{
@ -456,12 +456,12 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
}
else
{
bool isnull;
bool isnull;
values[colno] = routine->GetValue(tstate,
colno,
tupdesc->attrs[colno]->atttypid,
tupdesc->attrs[colno]->atttypmod,
tupdesc->attrs[colno]->atttypid,
tupdesc->attrs[colno]->atttypmod,
&isnull);
/* No value? Evaluate and apply the default, if any */
@ -479,7 +479,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null is not allowed in column \"%s\"",
NameStr(tupdesc->attrs[colno]->attname))));
NameStr(tupdesc->attrs[colno]->attname))));
nulls[colno] = isnull;
}

View File

@ -1230,7 +1230,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
{
if (list_length(stmt_list) == 1 &&
linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
portal->cursorOptions |= CURSOR_OPT_SCROLL;
@ -1246,7 +1246,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
if (portal->cursorOptions & CURSOR_OPT_SCROLL)
{
if (list_length(stmt_list) == 1 &&
linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@ -1990,8 +1990,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
stmt_list = pg_analyze_and_rewrite_params(parsetree,
src,
plan->parserSetup,
plan->parserSetupArg,
_SPI_current->queryEnv);
plan->parserSetupArg,
_SPI_current->queryEnv);
}
else
{
@ -2668,7 +2668,7 @@ SPI_register_relation(EphemeralNamedRelation enr)
if (enr == NULL || enr->md.name == NULL)
return SPI_ERROR_ARGUMENT;
res = _SPI_begin_call(false); /* keep current memory context */
res = _SPI_begin_call(false); /* keep current memory context */
if (res < 0)
return res;
@ -2702,7 +2702,7 @@ SPI_unregister_relation(const char *name)
if (name == NULL)
return SPI_ERROR_ARGUMENT;
res = _SPI_begin_call(false); /* keep current memory context */
res = _SPI_begin_call(false); /* keep current memory context */
if (res < 0)
return res;
@ -2735,8 +2735,8 @@ SPI_register_trigger_data(TriggerData *tdata)
if (tdata->tg_newtable)
{
EphemeralNamedRelation enr =
palloc(sizeof(EphemeralNamedRelationData));
int rc;
palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgnewtable;
enr->md.reliddesc = tdata->tg_relation->rd_id;
@ -2752,8 +2752,8 @@ SPI_register_trigger_data(TriggerData *tdata)
if (tdata->tg_oldtable)
{
EphemeralNamedRelation enr =
palloc(sizeof(EphemeralNamedRelationData));
int rc;
palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgoldtable;
enr->md.reliddesc = tdata->tg_relation->rd_id;