mirror of
https://github.com/postgres/postgres.git
synced 2025-09-02 04:21:28 +03:00
8.4 pgindent run, with new combined Linux/FreeBSD/MinGW typedef list
provided by Andrew.
This commit is contained in:
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execCurrent.c,v 1.9 2009/01/01 17:23:41 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execCurrent.c,v 1.10 2009/06/11 14:48:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -138,8 +138,8 @@ execCurrentOf(CurrentOfExpr *cexpr,
|
||||
|
||||
/*
|
||||
* This table didn't produce the cursor's current row; some other
|
||||
* inheritance child of the same parent must have. Signal caller
|
||||
* to do nothing on this table.
|
||||
* inheritance child of the same parent must have. Signal caller to
|
||||
* do nothing on this table.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
@@ -26,7 +26,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.324 2009/05/07 22:58:28 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.325 2009/06/11 14:48:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -62,9 +62,9 @@
|
||||
|
||||
|
||||
/* Hooks for plugins to get control in ExecutorStart/Run/End() */
|
||||
ExecutorStart_hook_type ExecutorStart_hook = NULL;
|
||||
ExecutorRun_hook_type ExecutorRun_hook = NULL;
|
||||
ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
|
||||
ExecutorStart_hook_type ExecutorStart_hook = NULL;
|
||||
ExecutorRun_hook_type ExecutorRun_hook = NULL;
|
||||
ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
|
||||
|
||||
typedef struct evalPlanQual
|
||||
{
|
||||
@@ -552,17 +552,17 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
|
||||
}
|
||||
|
||||
/*
|
||||
* Basically the same for the mod columns, with either INSERT or UPDATE
|
||||
* privilege as specified by remainingPerms.
|
||||
* Basically the same for the mod columns, with either INSERT or
|
||||
* UPDATE privilege as specified by remainingPerms.
|
||||
*/
|
||||
remainingPerms &= ~ACL_SELECT;
|
||||
if (remainingPerms != 0)
|
||||
{
|
||||
/*
|
||||
* When the query doesn't explicitly change any columns, allow
|
||||
* the query if we have permission on any column of the rel. This
|
||||
* is to handle SELECT FOR UPDATE as well as possible corner cases
|
||||
* in INSERT and UPDATE.
|
||||
* When the query doesn't explicitly change any columns, allow the
|
||||
* query if we have permission on any column of the rel. This is
|
||||
* to handle SELECT FOR UPDATE as well as possible corner cases in
|
||||
* INSERT and UPDATE.
|
||||
*/
|
||||
if (bms_is_empty(rte->modifiedCols))
|
||||
{
|
||||
@@ -843,9 +843,9 @@ InitPlan(QueryDesc *queryDesc, int eflags)
|
||||
|
||||
/*
|
||||
* Initialize the junk filter if needed. SELECT and INSERT queries need a
|
||||
* filter if there are any junk attrs in the tlist. UPDATE and
|
||||
* DELETE always need a filter, since there's always a junk 'ctid'
|
||||
* attribute present --- no need to look first.
|
||||
* filter if there are any junk attrs in the tlist. UPDATE and DELETE
|
||||
* always need a filter, since there's always a junk 'ctid' attribute
|
||||
* present --- no need to look first.
|
||||
*
|
||||
* This section of code is also a convenient place to verify that the
|
||||
* output of an INSERT or UPDATE matches the target table(s).
|
||||
@@ -1194,7 +1194,7 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList)
|
||||
errdetail("Table has type %s at ordinal position %d, but query expects %s.",
|
||||
format_type_be(attr->atttypid),
|
||||
attno,
|
||||
format_type_be(exprType((Node *) tle->expr)))));
|
||||
format_type_be(exprType((Node *) tle->expr)))));
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1215,7 +1215,7 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList)
|
||||
if (attno != resultDesc->natts)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("table row type and query-specified row type do not match"),
|
||||
errmsg("table row type and query-specified row type do not match"),
|
||||
errdetail("Query has too few columns.")));
|
||||
}
|
||||
|
||||
@@ -1547,7 +1547,7 @@ lnext: ;
|
||||
/* if child rel, must check whether it produced this row */
|
||||
if (erm->rti != erm->prti)
|
||||
{
|
||||
Oid tableoid;
|
||||
Oid tableoid;
|
||||
|
||||
datum = ExecGetJunkAttribute(slot,
|
||||
erm->toidAttNo,
|
||||
@@ -1774,8 +1774,8 @@ ExecInsert(TupleTableSlot *slot,
|
||||
* rowtype.
|
||||
*
|
||||
* XXX if we ever wanted to allow users to assign their own OIDs to new
|
||||
* rows, this'd be the place to do it. For the moment, we make a point
|
||||
* of doing this before calling triggers, so that a user-supplied trigger
|
||||
* rows, this'd be the place to do it. For the moment, we make a point of
|
||||
* doing this before calling triggers, so that a user-supplied trigger
|
||||
* could hack the OID if desired.
|
||||
*/
|
||||
if (resultRelationDesc->rd_rel->relhasoids)
|
||||
@@ -2847,7 +2847,7 @@ OpenIntoRel(QueryDesc *queryDesc)
|
||||
Oid intoRelationId;
|
||||
TupleDesc tupdesc;
|
||||
DR_intorel *myState;
|
||||
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
|
||||
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
|
||||
|
||||
Assert(into);
|
||||
|
||||
@@ -2970,8 +2970,8 @@ OpenIntoRel(QueryDesc *queryDesc)
|
||||
myState->rel = intoRelationDesc;
|
||||
|
||||
/*
|
||||
* We can skip WAL-logging the insertions, unless PITR is in use. We
|
||||
* can skip the FSM in any case.
|
||||
* We can skip WAL-logging the insertions, unless PITR is in use. We can
|
||||
* skip the FSM in any case.
|
||||
*/
|
||||
myState->hi_options = HEAP_INSERT_SKIP_FSM |
|
||||
(XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.248 2009/06/09 22:00:57 petere Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.249 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -63,8 +63,8 @@ static Datum ExecEvalAggref(AggrefExprState *aggref,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalWindowFunc(WindowFuncExprState *wfunc,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalScalarVar(ExprState *exprstate, ExprContext *econtext,
|
||||
@@ -78,7 +78,7 @@ static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
|
||||
static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static void init_fcache(Oid foid, FuncExprState *fcache,
|
||||
MemoryContext fcacheCxt, bool needDescForSets);
|
||||
MemoryContext fcacheCxt, bool needDescForSets);
|
||||
static void ShutdownFuncExpr(Datum arg);
|
||||
static TupleDesc get_cached_rowtype(Oid type_id, int32 typmod,
|
||||
TupleDesc *cache_field, ExprContext *econtext);
|
||||
@@ -617,7 +617,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("table row type and query-specified row type do not match"),
|
||||
errdetail_plural("Table row contains %d attribute, but query expects %d.",
|
||||
"Table row contains %d attributes, but query expects %d.",
|
||||
"Table row contains %d attributes, but query expects %d.",
|
||||
slot_tupdesc->natts,
|
||||
slot_tupdesc->natts,
|
||||
var_tupdesc->natts)));
|
||||
@@ -1044,10 +1044,10 @@ init_fcache(Oid foid, FuncExprState *fcache,
|
||||
if (list_length(fcache->args) > FUNC_MAX_ARGS)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
|
||||
errmsg_plural("cannot pass more than %d argument to a function",
|
||||
"cannot pass more than %d arguments to a function",
|
||||
FUNC_MAX_ARGS,
|
||||
FUNC_MAX_ARGS)));
|
||||
errmsg_plural("cannot pass more than %d argument to a function",
|
||||
"cannot pass more than %d arguments to a function",
|
||||
FUNC_MAX_ARGS,
|
||||
FUNC_MAX_ARGS)));
|
||||
|
||||
/* Set up the primary fmgr lookup information */
|
||||
fmgr_info_cxt(foid, &(fcache->func), fcacheCxt);
|
||||
@@ -1237,7 +1237,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
|
||||
* ExecPrepareTuplestoreResult
|
||||
*
|
||||
* Subroutine for ExecMakeFunctionResult: prepare to extract rows from a
|
||||
* tuplestore function result. We must set up a funcResultSlot (unless
|
||||
* tuplestore function result. We must set up a funcResultSlot (unless
|
||||
* already done in a previous call cycle) and verify that the function
|
||||
* returned the expected tuple descriptor.
|
||||
*/
|
||||
@@ -1268,9 +1268,8 @@ ExecPrepareTuplestoreResult(FuncExprState *fcache,
|
||||
}
|
||||
|
||||
/*
|
||||
* If function provided a tupdesc, cross-check it. We only really
|
||||
* need to do this for functions returning RECORD, but might as well
|
||||
* do it always.
|
||||
* If function provided a tupdesc, cross-check it. We only really need to
|
||||
* do this for functions returning RECORD, but might as well do it always.
|
||||
*/
|
||||
if (resultDesc)
|
||||
{
|
||||
@@ -1316,7 +1315,7 @@ tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc)
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("function return row and query-specified return row do not match"),
|
||||
errdetail_plural("Returned row contains %d attribute, but query expects %d.",
|
||||
"Returned row contains %d attributes, but query expects %d.",
|
||||
"Returned row contains %d attributes, but query expects %d.",
|
||||
src_tupdesc->natts,
|
||||
src_tupdesc->natts, dst_tupdesc->natts)));
|
||||
|
||||
@@ -1353,7 +1352,7 @@ tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc)
|
||||
* init_fcache is presumed already run on the FuncExprState.
|
||||
*
|
||||
* This function handles the most general case, wherein the function or
|
||||
* one of its arguments might (or might not) return a set. If we find
|
||||
* one of its arguments might (or might not) return a set. If we find
|
||||
* no sets involved, we will change the FuncExprState's function pointer
|
||||
* to use a simpler method on subsequent calls.
|
||||
*/
|
||||
@@ -1379,13 +1378,13 @@ restart:
|
||||
check_stack_depth();
|
||||
|
||||
/*
|
||||
* If a previous call of the function returned a set result in the form
|
||||
* of a tuplestore, continue reading rows from the tuplestore until it's
|
||||
* If a previous call of the function returned a set result in the form of
|
||||
* a tuplestore, continue reading rows from the tuplestore until it's
|
||||
* empty.
|
||||
*/
|
||||
if (fcache->funcResultStore)
|
||||
{
|
||||
Assert(isDone); /* it was provided before ... */
|
||||
Assert(isDone); /* it was provided before ... */
|
||||
if (tuplestore_gettupleslot(fcache->funcResultStore, true, false,
|
||||
fcache->funcResultSlot))
|
||||
{
|
||||
@@ -1420,10 +1419,10 @@ restart:
|
||||
* For non-set-returning functions, we just use a local-variable
|
||||
* FunctionCallInfoData. For set-returning functions we keep the callinfo
|
||||
* record in fcache->setArgs so that it can survive across multiple
|
||||
* value-per-call invocations. (The reason we don't just do the latter
|
||||
* all the time is that plpgsql expects to be able to use simple expression
|
||||
* trees re-entrantly. Which might not be a good idea, but the penalty
|
||||
* for not doing so is high.)
|
||||
* value-per-call invocations. (The reason we don't just do the latter
|
||||
* all the time is that plpgsql expects to be able to use simple
|
||||
* expression trees re-entrantly. Which might not be a good idea, but the
|
||||
* penalty for not doing so is high.)
|
||||
*/
|
||||
if (fcache->func.fn_retset)
|
||||
fcinfo = &fcache->setArgs;
|
||||
@@ -1534,7 +1533,7 @@ restart:
|
||||
*isDone = rsinfo.isDone;
|
||||
|
||||
pgstat_end_function_usage(&fcusage,
|
||||
rsinfo.isDone != ExprMultipleResult);
|
||||
rsinfo.isDone != ExprMultipleResult);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1564,7 +1563,7 @@ restart:
|
||||
{
|
||||
RegisterExprContextCallback(econtext,
|
||||
ShutdownFuncExpr,
|
||||
PointerGetDatum(fcache));
|
||||
PointerGetDatum(fcache));
|
||||
fcache->shutdown_reg = true;
|
||||
}
|
||||
}
|
||||
@@ -2043,9 +2042,8 @@ no_function_result:
|
||||
}
|
||||
|
||||
/*
|
||||
* If function provided a tupdesc, cross-check it. We only really
|
||||
* need to do this for functions returning RECORD, but might as well
|
||||
* do it always.
|
||||
* If function provided a tupdesc, cross-check it. We only really need to
|
||||
* do this for functions returning RECORD, but might as well do it always.
|
||||
*/
|
||||
if (rsinfo.setDesc)
|
||||
{
|
||||
@@ -3229,41 +3227,41 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
|
||||
break;
|
||||
|
||||
case IS_XMLFOREST:
|
||||
{
|
||||
StringInfoData buf;
|
||||
|
||||
initStringInfo(&buf);
|
||||
forboth(arg, xmlExpr->named_args, narg, xexpr->arg_names)
|
||||
{
|
||||
ExprState *e = (ExprState *) lfirst(arg);
|
||||
char *argname = strVal(lfirst(narg));
|
||||
StringInfoData buf;
|
||||
|
||||
value = ExecEvalExpr(e, econtext, &isnull, NULL);
|
||||
if (!isnull)
|
||||
initStringInfo(&buf);
|
||||
forboth(arg, xmlExpr->named_args, narg, xexpr->arg_names)
|
||||
{
|
||||
appendStringInfo(&buf, "<%s>%s</%s>",
|
||||
argname,
|
||||
map_sql_value_to_xml_value(value, exprType((Node *) e->expr), true),
|
||||
argname);
|
||||
*isNull = false;
|
||||
ExprState *e = (ExprState *) lfirst(arg);
|
||||
char *argname = strVal(lfirst(narg));
|
||||
|
||||
value = ExecEvalExpr(e, econtext, &isnull, NULL);
|
||||
if (!isnull)
|
||||
{
|
||||
appendStringInfo(&buf, "<%s>%s</%s>",
|
||||
argname,
|
||||
map_sql_value_to_xml_value(value, exprType((Node *) e->expr), true),
|
||||
argname);
|
||||
*isNull = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (*isNull)
|
||||
{
|
||||
pfree(buf.data);
|
||||
return (Datum) 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
text *result;
|
||||
|
||||
result = cstring_to_text_with_len(buf.data, buf.len);
|
||||
pfree(buf.data);
|
||||
|
||||
return PointerGetDatum(result);
|
||||
}
|
||||
}
|
||||
|
||||
if (*isNull)
|
||||
{
|
||||
pfree(buf.data);
|
||||
return (Datum) 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
text *result;
|
||||
|
||||
result = cstring_to_text_with_len(buf.data, buf.len);
|
||||
pfree(buf.data);
|
||||
|
||||
return PointerGetDatum(result);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case IS_XMLELEMENT:
|
||||
@@ -4095,9 +4093,9 @@ ExecEvalExprSwitchContext(ExprState *expression,
|
||||
*
|
||||
* Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to the
|
||||
* lists of such nodes held by the parent PlanState. Otherwise, we do very
|
||||
* little initialization here other than building the state-node tree. Any
|
||||
* little initialization here other than building the state-node tree. Any
|
||||
* nontrivial work associated with initializing runtime info for a node should
|
||||
* happen during the first actual evaluation of that node. (This policy lets
|
||||
* happen during the first actual evaluation of that node. (This policy lets
|
||||
* us avoid work if the node is never actually evaluated.)
|
||||
*
|
||||
* Note: there is no ExecEndExpr function; we assume that any resource
|
||||
@@ -4209,7 +4207,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
||||
if (nfuncs != winstate->numfuncs)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WINDOWING_ERROR),
|
||||
errmsg("window function calls cannot be nested")));
|
||||
errmsg("window function calls cannot be nested")));
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -5156,11 +5154,11 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
|
||||
numSimpleVars = projInfo->pi_numSimpleVars;
|
||||
if (numSimpleVars > 0)
|
||||
{
|
||||
Datum *values = slot->tts_values;
|
||||
bool *isnull = slot->tts_isnull;
|
||||
int *varSlotOffsets = projInfo->pi_varSlotOffsets;
|
||||
int *varNumbers = projInfo->pi_varNumbers;
|
||||
int i;
|
||||
Datum *values = slot->tts_values;
|
||||
bool *isnull = slot->tts_isnull;
|
||||
int *varSlotOffsets = projInfo->pi_varSlotOffsets;
|
||||
int *varNumbers = projInfo->pi_varNumbers;
|
||||
int i;
|
||||
|
||||
if (projInfo->pi_directMap)
|
||||
{
|
||||
@@ -5178,7 +5176,7 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
|
||||
else
|
||||
{
|
||||
/* we have to pay attention to varOutputCols[] */
|
||||
int *varOutputCols = projInfo->pi_varOutputCols;
|
||||
int *varOutputCols = projInfo->pi_varOutputCols;
|
||||
|
||||
for (i = 0; i < numSimpleVars; i++)
|
||||
{
|
||||
@@ -5195,9 +5193,9 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
|
||||
|
||||
/*
|
||||
* If there are any generic expressions, evaluate them. It's possible
|
||||
* that there are set-returning functions in such expressions; if so
|
||||
* and we have reached the end of the set, we return the result slot,
|
||||
* which we already marked empty.
|
||||
* that there are set-returning functions in such expressions; if so and
|
||||
* we have reached the end of the set, we return the result slot, which we
|
||||
* already marked empty.
|
||||
*/
|
||||
if (projInfo->pi_targetlist)
|
||||
{
|
||||
|
@@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.106 2009/03/30 04:08:43 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.107 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -756,9 +756,9 @@ ExecFetchSlotMinimalTuple(TupleTableSlot *slot)
|
||||
|
||||
/*
|
||||
* Note: we may now have a situation where we have a local minimal tuple
|
||||
* attached to a virtual or non-local physical tuple. There seems no
|
||||
* harm in that at the moment, but if any materializes, we should change
|
||||
* this function to force the slot into minimal-tuple-only state.
|
||||
* attached to a virtual or non-local physical tuple. There seems no harm
|
||||
* in that at the moment, but if any materializes, we should change this
|
||||
* function to force the slot into minimal-tuple-only state.
|
||||
*/
|
||||
|
||||
return slot->tts_mintuple;
|
||||
@@ -843,9 +843,9 @@ ExecMaterializeSlot(TupleTableSlot *slot)
|
||||
slot->tts_buffer = InvalidBuffer;
|
||||
|
||||
/*
|
||||
* Mark extracted state invalid. This is important because the slot
|
||||
* is not supposed to depend any more on the previous external data;
|
||||
* we mustn't leave any dangling pass-by-reference datums in tts_values.
|
||||
* Mark extracted state invalid. This is important because the slot is
|
||||
* not supposed to depend any more on the previous external data; we
|
||||
* mustn't leave any dangling pass-by-reference datums in tts_values.
|
||||
* However, we have not actually invalidated any such datums, if there
|
||||
* happen to be any previously fetched from the slot. (Note in particular
|
||||
* that we have not pfree'd tts_mintuple, if there is one.)
|
||||
@@ -854,9 +854,9 @@ ExecMaterializeSlot(TupleTableSlot *slot)
|
||||
|
||||
/*
|
||||
* On the same principle of not depending on previous remote storage,
|
||||
* forget the mintuple if it's not local storage. (If it is local storage,
|
||||
* we must not pfree it now, since callers might have already fetched
|
||||
* datum pointers referencing it.)
|
||||
* forget the mintuple if it's not local storage. (If it is local
|
||||
* storage, we must not pfree it now, since callers might have already
|
||||
* fetched datum pointers referencing it.)
|
||||
*/
|
||||
if (!slot->tts_shouldFreeMin)
|
||||
slot->tts_mintuple = NULL;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.158 2009/04/02 22:39:30 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.159 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -584,8 +584,8 @@ ExecBuildProjectionInfo(List *targetList,
|
||||
|
||||
/*
|
||||
* We separate the target list elements into simple Var references and
|
||||
* expressions which require the full ExecTargetList machinery. To be
|
||||
* a simple Var, a Var has to be a user attribute and not mismatch the
|
||||
* expressions which require the full ExecTargetList machinery. To be a
|
||||
* simple Var, a Var has to be a user attribute and not mismatch the
|
||||
* inputDesc. (Note: if there is a type mismatch then ExecEvalVar will
|
||||
* probably throw an error at runtime, but we leave that to it.)
|
||||
*/
|
||||
@@ -621,7 +621,7 @@ ExecBuildProjectionInfo(List *targetList,
|
||||
|
||||
varNumbers[numSimpleVars] = attnum;
|
||||
varOutputCols[numSimpleVars] = tle->resno;
|
||||
if (tle->resno != numSimpleVars+1)
|
||||
if (tle->resno != numSimpleVars + 1)
|
||||
directMap = false;
|
||||
|
||||
switch (variable->varno)
|
||||
@@ -683,7 +683,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
|
||||
return false;
|
||||
if (IsA(node, Var))
|
||||
{
|
||||
Var *variable = (Var *) node;
|
||||
Var *variable = (Var *) node;
|
||||
AttrNumber attnum = variable->varattno;
|
||||
|
||||
switch (variable->varno)
|
||||
@@ -705,9 +705,10 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't examine the arguments of Aggrefs or WindowFuncs, because those
|
||||
* do not represent expressions to be evaluated within the overall
|
||||
* Don't examine the arguments of Aggrefs or WindowFuncs, because those do
|
||||
* not represent expressions to be evaluated within the overall
|
||||
* targetlist's econtext.
|
||||
*/
|
||||
if (IsA(node, Aggref))
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.133 2009/03/27 18:30:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.134 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -101,8 +101,8 @@ typedef SQLFunctionCache *SQLFunctionCachePtr;
|
||||
|
||||
/* non-export function prototypes */
|
||||
static execution_state *init_execution_state(List *queryTree_list,
|
||||
SQLFunctionCachePtr fcache,
|
||||
bool lazyEvalOK);
|
||||
SQLFunctionCachePtr fcache,
|
||||
bool lazyEvalOK);
|
||||
static void init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK);
|
||||
static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
|
||||
static bool postquel_getnext(execution_state *es, SQLFunctionCachePtr fcache);
|
||||
@@ -168,8 +168,8 @@ init_execution_state(List *queryTree_list,
|
||||
|
||||
newes->next = NULL;
|
||||
newes->status = F_EXEC_START;
|
||||
newes->setsResult = false; /* might change below */
|
||||
newes->lazyEval = false; /* might change below */
|
||||
newes->setsResult = false; /* might change below */
|
||||
newes->lazyEval = false; /* might change below */
|
||||
newes->stmt = stmt;
|
||||
newes->qd = NULL;
|
||||
|
||||
@@ -180,9 +180,9 @@ init_execution_state(List *queryTree_list,
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the last canSetTag query as delivering the function result;
|
||||
* then, if it is a plain SELECT, mark it for lazy evaluation.
|
||||
* If it's not a SELECT we must always run it to completion.
|
||||
* Mark the last canSetTag query as delivering the function result; then,
|
||||
* if it is a plain SELECT, mark it for lazy evaluation. If it's not a
|
||||
* SELECT we must always run it to completion.
|
||||
*
|
||||
* Note: at some point we might add additional criteria for whether to use
|
||||
* lazy eval. However, we should prefer to use it whenever the function
|
||||
@@ -191,8 +191,8 @@ init_execution_state(List *queryTree_list,
|
||||
*
|
||||
* Note: don't set setsResult if the function returns VOID, as evidenced
|
||||
* by not having made a junkfilter. This ensures we'll throw away any
|
||||
* output from a utility statement that check_sql_fn_retval deemed to
|
||||
* not have output.
|
||||
* output from a utility statement that check_sql_fn_retval deemed to not
|
||||
* have output.
|
||||
*/
|
||||
if (lasttages && fcache->junkFilter)
|
||||
{
|
||||
@@ -326,10 +326,10 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK)
|
||||
* Note: we set fcache->returnsTuple according to whether we are returning
|
||||
* the whole tuple result or just a single column. In the latter case we
|
||||
* clear returnsTuple because we need not act different from the scalar
|
||||
* result case, even if it's a rowtype column. (However, we have to
|
||||
* force lazy eval mode in that case; otherwise we'd need extra code to
|
||||
* expand the rowtype column into multiple columns, since we have no
|
||||
* way to notify the caller that it should do that.)
|
||||
* result case, even if it's a rowtype column. (However, we have to force
|
||||
* lazy eval mode in that case; otherwise we'd need extra code to expand
|
||||
* the rowtype column into multiple columns, since we have no way to
|
||||
* notify the caller that it should do that.)
|
||||
*
|
||||
* check_sql_fn_retval will also construct a JunkFilter we can use to
|
||||
* coerce the returned rowtype to the desired form (unless the result type
|
||||
@@ -459,7 +459,7 @@ postquel_getnext(execution_state *es, SQLFunctionCachePtr fcache)
|
||||
es->qd->utilitystmt),
|
||||
fcache->src,
|
||||
es->qd->params,
|
||||
false, /* not top level */
|
||||
false, /* not top level */
|
||||
es->qd->dest,
|
||||
NULL);
|
||||
result = true; /* never stops early */
|
||||
@@ -566,7 +566,7 @@ postquel_get_single_result(TupleTableSlot *slot,
|
||||
/*
|
||||
* Set up to return the function value. For pass-by-reference datatypes,
|
||||
* be sure to allocate the result in resultcontext, not the current memory
|
||||
* context (which has query lifespan). We can't leave the data in the
|
||||
* context (which has query lifespan). We can't leave the data in the
|
||||
* TupleTableSlot because we intend to clear the slot before returning.
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(resultcontext);
|
||||
@@ -670,8 +670,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
|
||||
postquel_sub_params(fcache, fcinfo);
|
||||
|
||||
/*
|
||||
* Build tuplestore to hold results, if we don't have one already.
|
||||
* Note it's in the query-lifespan context.
|
||||
* Build tuplestore to hold results, if we don't have one already. Note
|
||||
* it's in the query-lifespan context.
|
||||
*/
|
||||
if (!fcache->tstore)
|
||||
fcache->tstore = tuplestore_begin_heap(randomAccess, false, work_mem);
|
||||
@@ -688,7 +688,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
|
||||
*/
|
||||
while (es)
|
||||
{
|
||||
bool completed;
|
||||
bool completed;
|
||||
|
||||
if (es->status == F_EXEC_START)
|
||||
postquel_start(es, fcache);
|
||||
@@ -696,22 +696,22 @@ fmgr_sql(PG_FUNCTION_ARGS)
|
||||
completed = postquel_getnext(es, fcache);
|
||||
|
||||
/*
|
||||
* If we ran the command to completion, we can shut it down now.
|
||||
* Any row(s) we need to return are safely stashed in the tuplestore,
|
||||
* and we want to be sure that, for example, AFTER triggers get fired
|
||||
* If we ran the command to completion, we can shut it down now. Any
|
||||
* row(s) we need to return are safely stashed in the tuplestore, and
|
||||
* we want to be sure that, for example, AFTER triggers get fired
|
||||
* before we return anything. Also, if the function doesn't return
|
||||
* set, we can shut it down anyway because it must be a SELECT and
|
||||
* we don't care about fetching any more result rows.
|
||||
* set, we can shut it down anyway because it must be a SELECT and we
|
||||
* don't care about fetching any more result rows.
|
||||
*/
|
||||
if (completed || !fcache->returnsSet)
|
||||
postquel_end(es);
|
||||
|
||||
/*
|
||||
* Break from loop if we didn't shut down (implying we got a
|
||||
* lazily-evaluated row). Otherwise we'll press on till the
|
||||
* whole function is done, relying on the tuplestore to keep hold
|
||||
* of the data to eventually be returned. This is necessary since
|
||||
* an INSERT/UPDATE/DELETE RETURNING that sets the result might be
|
||||
* lazily-evaluated row). Otherwise we'll press on till the whole
|
||||
* function is done, relying on the tuplestore to keep hold of the
|
||||
* data to eventually be returned. This is necessary since an
|
||||
* INSERT/UPDATE/DELETE RETURNING that sets the result might be
|
||||
* followed by additional rule-inserted commands, and we want to
|
||||
* finish doing all those commands before we return anything.
|
||||
*/
|
||||
@@ -730,7 +730,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
|
||||
if (es)
|
||||
{
|
||||
/*
|
||||
* If we stopped short of being done, we must have a lazy-eval row.
|
||||
* If we stopped short of being done, we must have a lazy-eval
|
||||
* row.
|
||||
*/
|
||||
Assert(es->lazyEval);
|
||||
/* Re-use the junkfilter's output slot to fetch back the tuple */
|
||||
@@ -765,7 +766,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
|
||||
else if (fcache->lazyEval)
|
||||
{
|
||||
/*
|
||||
* We are done with a lazy evaluation. Clean up.
|
||||
* We are done with a lazy evaluation. Clean up.
|
||||
*/
|
||||
tuplestore_clear(fcache->tstore);
|
||||
|
||||
@@ -789,9 +790,9 @@ fmgr_sql(PG_FUNCTION_ARGS)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We are done with a non-lazy evaluation. Return whatever is
|
||||
* in the tuplestore. (It is now caller's responsibility to
|
||||
* free the tuplestore when done.)
|
||||
* We are done with a non-lazy evaluation. Return whatever is in
|
||||
* the tuplestore. (It is now caller's responsibility to free the
|
||||
* tuplestore when done.)
|
||||
*/
|
||||
rsi->returnMode = SFRM_Materialize;
|
||||
rsi->setResult = fcache->tstore;
|
||||
@@ -844,8 +845,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we've gone through every command in the function, we are done.
|
||||
* Reset the execution states to start over again on next call.
|
||||
* If we've gone through every command in the function, we are done. Reset
|
||||
* the execution states to start over again on next call.
|
||||
*/
|
||||
if (es == NULL)
|
||||
{
|
||||
@@ -997,7 +998,7 @@ ShutdownSQLFunction(Datum arg)
|
||||
* function definition of a polymorphic function.)
|
||||
*
|
||||
* This function returns true if the sql function returns the entire tuple
|
||||
* result of its final statement, and false otherwise. Note that because we
|
||||
* result of its final statement, and false otherwise. Note that because we
|
||||
* allow "SELECT rowtype_expression", this may be false even when the declared
|
||||
* function return type is a rowtype.
|
||||
*
|
||||
@@ -1029,14 +1030,14 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
*junkFilter = NULL; /* initialize in case of VOID result */
|
||||
|
||||
/*
|
||||
* Find the last canSetTag query in the list. This isn't necessarily
|
||||
* the last parsetree, because rule rewriting can insert queries after
|
||||
* what the user wrote.
|
||||
* Find the last canSetTag query in the list. This isn't necessarily the
|
||||
* last parsetree, because rule rewriting can insert queries after what
|
||||
* the user wrote.
|
||||
*/
|
||||
parse = NULL;
|
||||
foreach(lc, queryTreeList)
|
||||
{
|
||||
Query *q = (Query *) lfirst(lc);
|
||||
Query *q = (Query *) lfirst(lc);
|
||||
|
||||
if (q->canSetTag)
|
||||
parse = q;
|
||||
@@ -1044,12 +1045,12 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
|
||||
/*
|
||||
* If it's a plain SELECT, it returns whatever the targetlist says.
|
||||
* Otherwise, if it's INSERT/UPDATE/DELETE with RETURNING, it returns that.
|
||||
* Otherwise, the function return type must be VOID.
|
||||
* Otherwise, if it's INSERT/UPDATE/DELETE with RETURNING, it returns
|
||||
* that. Otherwise, the function return type must be VOID.
|
||||
*
|
||||
* Note: eventually replace this test with QueryReturnsTuples? We'd need
|
||||
* a more general method of determining the output type, though. Also,
|
||||
* it seems too dangerous to consider FETCH or EXECUTE as returning a
|
||||
* a more general method of determining the output type, though. Also, it
|
||||
* seems too dangerous to consider FETCH or EXECUTE as returning a
|
||||
* determinable rowtype, since they depend on relatively short-lived
|
||||
* entities.
|
||||
*/
|
||||
@@ -1076,7 +1077,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING.")));
|
||||
errdetail("Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING.")));
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1112,7 +1113,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Final statement must return exactly one column.")));
|
||||
errdetail("Final statement must return exactly one column.")));
|
||||
|
||||
/* We assume here that non-junk TLEs must come first in tlists */
|
||||
tle = (TargetEntry *) linitial(tlist);
|
||||
@@ -1148,8 +1149,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
* If the target list is of length 1, and the type of the varnode in
|
||||
* the target list matches the declared return type, this is okay.
|
||||
* This can happen, for example, where the body of the function is
|
||||
* 'SELECT func2()', where func2 has the same composite return type
|
||||
* as the function that's calling it.
|
||||
* 'SELECT func2()', where func2 has the same composite return type as
|
||||
* the function that's calling it.
|
||||
*/
|
||||
if (tlistlen == 1)
|
||||
{
|
||||
@@ -1211,7 +1212,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("return type mismatch in function declared to return %s",
|
||||
format_type_be(rettype)),
|
||||
errdetail("Final statement returns too many columns.")));
|
||||
errdetail("Final statement returns too many columns.")));
|
||||
attr = tupdesc->attrs[colindex - 1];
|
||||
} while (attr->attisdropped);
|
||||
tuplogcols++;
|
||||
|
@@ -21,7 +21,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.34 2009/01/12 16:00:41 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.35 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -114,17 +114,17 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we haven't yet performed the underlying index scan, do it, and
|
||||
* begin the iteration over the bitmap.
|
||||
* If we haven't yet performed the underlying index scan, do it, and begin
|
||||
* the iteration over the bitmap.
|
||||
*
|
||||
* For prefetching, we use *two* iterators, one for the pages we are
|
||||
* actually scanning and another that runs ahead of the first for
|
||||
* prefetching. node->prefetch_pages tracks exactly how many pages
|
||||
* ahead the prefetch iterator is. Also, node->prefetch_target tracks
|
||||
* the desired prefetch distance, which starts small and increases up
|
||||
* to the GUC-controlled maximum, target_prefetch_pages. This is to
|
||||
* avoid doing a lot of prefetching in a scan that stops after a few
|
||||
* tuples because of a LIMIT.
|
||||
* prefetching. node->prefetch_pages tracks exactly how many pages ahead
|
||||
* the prefetch iterator is. Also, node->prefetch_target tracks the
|
||||
* desired prefetch distance, which starts small and increases up to the
|
||||
* GUC-controlled maximum, target_prefetch_pages. This is to avoid doing
|
||||
* a lot of prefetching in a scan that stops after a few tuples because of
|
||||
* a LIMIT.
|
||||
*/
|
||||
if (tbm == NULL)
|
||||
{
|
||||
@@ -144,7 +144,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
node->prefetch_pages = 0;
|
||||
node->prefetch_target = -1;
|
||||
}
|
||||
#endif /* USE_PREFETCH */
|
||||
#endif /* USE_PREFETCH */
|
||||
}
|
||||
|
||||
for (;;)
|
||||
@@ -178,7 +178,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
if (tbmpre == NULL || tbmpre->blockno != tbmres->blockno)
|
||||
elog(ERROR, "prefetch and main iterators are out of sync");
|
||||
}
|
||||
#endif /* USE_PREFETCH */
|
||||
#endif /* USE_PREFETCH */
|
||||
|
||||
/*
|
||||
* Ignore any claimed entries past what we think is the end of the
|
||||
@@ -203,21 +203,22 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
scan->rs_cindex = 0;
|
||||
|
||||
#ifdef USE_PREFETCH
|
||||
|
||||
/*
|
||||
* Increase prefetch target if it's not yet at the max. Note
|
||||
* that we will increase it to zero after fetching the very
|
||||
* first page/tuple, then to one after the second tuple is
|
||||
* fetched, then it doubles as later pages are fetched.
|
||||
* Increase prefetch target if it's not yet at the max. Note that
|
||||
* we will increase it to zero after fetching the very first
|
||||
* page/tuple, then to one after the second tuple is fetched, then
|
||||
* it doubles as later pages are fetched.
|
||||
*/
|
||||
if (node->prefetch_target >= target_prefetch_pages)
|
||||
/* don't increase any further */ ;
|
||||
/* don't increase any further */ ;
|
||||
else if (node->prefetch_target >= target_prefetch_pages / 2)
|
||||
node->prefetch_target = target_prefetch_pages;
|
||||
else if (node->prefetch_target > 0)
|
||||
node->prefetch_target *= 2;
|
||||
else
|
||||
node->prefetch_target++;
|
||||
#endif /* USE_PREFETCH */
|
||||
#endif /* USE_PREFETCH */
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -227,13 +228,14 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
scan->rs_cindex++;
|
||||
|
||||
#ifdef USE_PREFETCH
|
||||
|
||||
/*
|
||||
* Try to prefetch at least a few pages even before we get to the
|
||||
* second page if we don't stop reading after the first tuple.
|
||||
*/
|
||||
if (node->prefetch_target < target_prefetch_pages)
|
||||
node->prefetch_target++;
|
||||
#endif /* USE_PREFETCH */
|
||||
#endif /* USE_PREFETCH */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -246,12 +248,13 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
}
|
||||
|
||||
#ifdef USE_PREFETCH
|
||||
|
||||
/*
|
||||
* We issue prefetch requests *after* fetching the current page
|
||||
* to try to avoid having prefetching interfere with the main I/O.
|
||||
* Also, this should happen only when we have determined there is
|
||||
* still something to do on the current page, else we may uselessly
|
||||
* prefetch the same page we are just about to request for real.
|
||||
* We issue prefetch requests *after* fetching the current page to try
|
||||
* to avoid having prefetching interfere with the main I/O. Also, this
|
||||
* should happen only when we have determined there is still something
|
||||
* to do on the current page, else we may uselessly prefetch the same
|
||||
* page we are just about to request for real.
|
||||
*/
|
||||
if (prefetch_iterator)
|
||||
{
|
||||
@@ -270,7 +273,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
|
||||
PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
|
||||
}
|
||||
}
|
||||
#endif /* USE_PREFETCH */
|
||||
#endif /* USE_PREFETCH */
|
||||
|
||||
/*
|
||||
* Okay to fetch the tuple
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.29 2009/01/01 17:23:41 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.30 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -94,7 +94,7 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
|
||||
|
||||
doscan = ExecIndexAdvanceArrayKeys(node->biss_ArrayKeys,
|
||||
node->biss_NumArrayKeys);
|
||||
if (doscan) /* reset index scan */
|
||||
if (doscan) /* reset index scan */
|
||||
index_rescan(node->biss_ScanDesc, node->biss_ScanKeys);
|
||||
}
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeCtescan.c,v 1.4 2009/03/27 18:30:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeCtescan.c,v 1.5 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -73,8 +73,8 @@ CteScanNext(CteScanState *node)
|
||||
* If we can fetch another tuple from the tuplestore, return it.
|
||||
*
|
||||
* Note: we have to use copy=true in the tuplestore_gettupleslot call,
|
||||
* because we are sharing the tuplestore with other nodes that might
|
||||
* write into the tuplestore before we get called again.
|
||||
* because we are sharing the tuplestore with other nodes that might write
|
||||
* into the tuplestore before we get called again.
|
||||
*/
|
||||
if (!eof_tuplestore)
|
||||
{
|
||||
@@ -111,16 +111,16 @@ CteScanNext(CteScanState *node)
|
||||
* Append a copy of the returned tuple to tuplestore. NOTE: because
|
||||
* our read pointer is certainly in EOF state, its read position will
|
||||
* move forward over the added tuple. This is what we want. Also,
|
||||
* any other readers will *not* move past the new tuple, which is
|
||||
* what they want.
|
||||
* any other readers will *not* move past the new tuple, which is what
|
||||
* they want.
|
||||
*/
|
||||
tuplestore_puttupleslot(tuplestorestate, cteslot);
|
||||
|
||||
/*
|
||||
* We MUST copy the CTE query's output tuple into our own slot.
|
||||
* This is because other CteScan nodes might advance the CTE query
|
||||
* before we are called again, and our output tuple must stay
|
||||
* stable over that.
|
||||
* We MUST copy the CTE query's output tuple into our own slot. This
|
||||
* is because other CteScan nodes might advance the CTE query before
|
||||
* we are called again, and our output tuple must stay stable over
|
||||
* that.
|
||||
*/
|
||||
return ExecCopySlot(slot, cteslot);
|
||||
}
|
||||
@@ -193,10 +193,10 @@ ExecInitCteScan(CteScan *node, EState *estate, int eflags)
|
||||
node->ctePlanId - 1);
|
||||
|
||||
/*
|
||||
* The Param slot associated with the CTE query is used to hold a
|
||||
* pointer to the CteState of the first CteScan node that initializes
|
||||
* for this CTE. This node will be the one that holds the shared
|
||||
* state for all the CTEs.
|
||||
* The Param slot associated with the CTE query is used to hold a pointer
|
||||
* to the CteState of the first CteScan node that initializes for this
|
||||
* CTE. This node will be the one that holds the shared state for all the
|
||||
* CTEs.
|
||||
*/
|
||||
prmdata = &(estate->es_param_exec_vals[node->cteParam]);
|
||||
Assert(prmdata->execPlan == NULL);
|
||||
@@ -315,8 +315,8 @@ ExecCteScanReScan(CteScanState *node, ExprContext *exprCtxt)
|
||||
if (node->leader == node)
|
||||
{
|
||||
/*
|
||||
* The leader is responsible for clearing the tuplestore if a new
|
||||
* scan of the underlying CTE is required.
|
||||
* The leader is responsible for clearing the tuplestore if a new scan
|
||||
* of the underlying CTE is required.
|
||||
*/
|
||||
if (node->cteplanstate->chgParam != NULL)
|
||||
{
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.51 2009/03/27 18:30:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.52 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -277,7 +277,7 @@ ExecFunctionReScan(FunctionScanState *node, ExprContext *exprCtxt)
|
||||
/*
|
||||
* Here we have a choice whether to drop the tuplestore (and recompute the
|
||||
* function outputs) or just rescan it. We must recompute if the
|
||||
* expression contains parameters, else we rescan. XXX maybe we should
|
||||
* expression contains parameters, else we rescan. XXX maybe we should
|
||||
* recompute if the function is volatile?
|
||||
*/
|
||||
if (node->ss.ps.chgParam != NULL)
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.120 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.121 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -41,11 +41,11 @@
|
||||
|
||||
static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
|
||||
static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
|
||||
int mcvsToUse);
|
||||
int mcvsToUse);
|
||||
static void ExecHashSkewTableInsert(HashJoinTable hashtable,
|
||||
TupleTableSlot *slot,
|
||||
uint32 hashvalue,
|
||||
int bucketNumber);
|
||||
TupleTableSlot *slot,
|
||||
uint32 hashvalue,
|
||||
int bucketNumber);
|
||||
static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ MultiExecHash(HashState *node)
|
||||
if (ExecHashGetHashValue(hashtable, econtext, hashkeys, false, false,
|
||||
&hashvalue))
|
||||
{
|
||||
int bucketNumber;
|
||||
int bucketNumber;
|
||||
|
||||
bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
|
||||
if (bucketNumber != INVALID_SKEW_BUCKET_NO)
|
||||
@@ -373,7 +373,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
|
||||
|
||||
/*
|
||||
* Set up for skew optimization, if possible and there's a need for more
|
||||
* than one batch. (In a one-batch join, there's no point in it.)
|
||||
* than one batch. (In a one-batch join, there's no point in it.)
|
||||
*/
|
||||
if (nbatch > 1)
|
||||
ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
|
||||
@@ -446,14 +446,14 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
|
||||
skew_table_bytes = hash_table_bytes * SKEW_WORK_MEM_PERCENT / 100;
|
||||
|
||||
*num_skew_mcvs = skew_table_bytes / (
|
||||
/* size of a hash tuple */
|
||||
tupsize +
|
||||
/* worst-case size of skewBucket[] per MCV */
|
||||
(8 * sizeof(HashSkewBucket *)) +
|
||||
/* size of skewBucketNums[] entry */
|
||||
sizeof(int) +
|
||||
/* size of skew bucket struct itself */
|
||||
SKEW_BUCKET_OVERHEAD
|
||||
/* size of a hash tuple */
|
||||
tupsize +
|
||||
/* worst-case size of skewBucket[] per MCV */
|
||||
(8 * sizeof(HashSkewBucket *)) +
|
||||
/* size of skewBucketNums[] entry */
|
||||
sizeof(int) +
|
||||
/* size of skew bucket struct itself */
|
||||
SKEW_BUCKET_OVERHEAD
|
||||
);
|
||||
|
||||
if (*num_skew_mcvs > 0)
|
||||
@@ -983,11 +983,11 @@ ExecReScanHash(HashState *node, ExprContext *exprCtxt)
|
||||
static void
|
||||
ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
|
||||
{
|
||||
HeapTupleData *statsTuple;
|
||||
Datum *values;
|
||||
int nvalues;
|
||||
float4 *numbers;
|
||||
int nnumbers;
|
||||
HeapTupleData *statsTuple;
|
||||
Datum *values;
|
||||
int nvalues;
|
||||
float4 *numbers;
|
||||
int nnumbers;
|
||||
|
||||
/* Do nothing if planner didn't identify the outer relation's join key */
|
||||
if (!OidIsValid(node->skewTable))
|
||||
@@ -1040,11 +1040,12 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
|
||||
*
|
||||
* skewBucket[] is an open addressing hashtable with a power of 2 size
|
||||
* that is greater than the number of MCV values. (This ensures there
|
||||
* will be at least one null entry, so searches will always terminate.)
|
||||
* will be at least one null entry, so searches will always
|
||||
* terminate.)
|
||||
*
|
||||
* Note: this code could fail if mcvsToUse exceeds INT_MAX/8, but
|
||||
* that is not currently possible since we limit pg_statistic entries
|
||||
* to much less than that.
|
||||
* Note: this code could fail if mcvsToUse exceeds INT_MAX/8, but that
|
||||
* is not currently possible since we limit pg_statistic entries to
|
||||
* much less than that.
|
||||
*/
|
||||
nbuckets = 2;
|
||||
while (nbuckets <= mcvsToUse)
|
||||
@@ -1056,9 +1057,9 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
|
||||
hashtable->skewBucketLen = nbuckets;
|
||||
|
||||
/*
|
||||
* We allocate the bucket memory in the hashtable's batch context.
|
||||
* It is only needed during the first batch, and this ensures it
|
||||
* will be automatically removed once the first batch is done.
|
||||
* We allocate the bucket memory in the hashtable's batch context. It
|
||||
* is only needed during the first batch, and this ensures it will be
|
||||
* automatically removed once the first batch is done.
|
||||
*/
|
||||
hashtable->skewBucket = (HashSkewBucket **)
|
||||
MemoryContextAllocZero(hashtable->batchCxt,
|
||||
@@ -1075,18 +1076,18 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
|
||||
/*
|
||||
* Create a skew bucket for each MCV hash value.
|
||||
*
|
||||
* Note: it is very important that we create the buckets in order
|
||||
* of decreasing MCV frequency. If we have to remove some buckets,
|
||||
* they must be removed in reverse order of creation (see notes in
|
||||
* ExecHashRemoveNextSkewBucket) and we want the least common MCVs
|
||||
* to be removed first.
|
||||
* Note: it is very important that we create the buckets in order of
|
||||
* decreasing MCV frequency. If we have to remove some buckets, they
|
||||
* must be removed in reverse order of creation (see notes in
|
||||
* ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
|
||||
* be removed first.
|
||||
*/
|
||||
hashfunctions = hashtable->outer_hashfunctions;
|
||||
|
||||
for (i = 0; i < mcvsToUse; i++)
|
||||
{
|
||||
uint32 hashvalue;
|
||||
int bucket;
|
||||
uint32 hashvalue;
|
||||
int bucket;
|
||||
|
||||
hashvalue = DatumGetUInt32(FunctionCall1(&hashfunctions[0],
|
||||
values[i]));
|
||||
@@ -1094,7 +1095,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
|
||||
/*
|
||||
* While we have not hit a hole in the hashtable and have not hit
|
||||
* the desired bucket, we have collided with some previous hash
|
||||
* value, so try the next bucket location. NB: this code must
|
||||
* value, so try the next bucket location. NB: this code must
|
||||
* match ExecHashGetSkewBucket.
|
||||
*/
|
||||
bucket = hashvalue & (nbuckets - 1);
|
||||
@@ -1103,8 +1104,8 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
|
||||
bucket = (bucket + 1) & (nbuckets - 1);
|
||||
|
||||
/*
|
||||
* If we found an existing bucket with the same hashvalue,
|
||||
* leave it alone. It's okay for two MCVs to share a hashvalue.
|
||||
* If we found an existing bucket with the same hashvalue, leave
|
||||
* it alone. It's okay for two MCVs to share a hashvalue.
|
||||
*/
|
||||
if (hashtable->skewBucket[bucket] != NULL)
|
||||
continue;
|
||||
@@ -1141,8 +1142,8 @@ ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
|
||||
int bucket;
|
||||
|
||||
/*
|
||||
* Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization
|
||||
* (in particular, this happens after the initial batch is done).
|
||||
* Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
|
||||
* particular, this happens after the initial batch is done).
|
||||
*/
|
||||
if (!hashtable->skewEnabled)
|
||||
return INVALID_SKEW_BUCKET_NO;
|
||||
@@ -1154,8 +1155,8 @@ ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
|
||||
|
||||
/*
|
||||
* While we have not hit a hole in the hashtable and have not hit the
|
||||
* desired bucket, we have collided with some other hash value, so try
|
||||
* the next bucket location.
|
||||
* desired bucket, we have collided with some other hash value, so try the
|
||||
* next bucket location.
|
||||
*/
|
||||
while (hashtable->skewBucket[bucket] != NULL &&
|
||||
hashtable->skewBucket[bucket]->hashvalue != hashvalue)
|
||||
@@ -1222,11 +1223,11 @@ ExecHashSkewTableInsert(HashJoinTable hashtable,
|
||||
static void
|
||||
ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
|
||||
{
|
||||
int bucketToRemove;
|
||||
int bucketToRemove;
|
||||
HashSkewBucket *bucket;
|
||||
uint32 hashvalue;
|
||||
int bucketno;
|
||||
int batchno;
|
||||
uint32 hashvalue;
|
||||
int bucketno;
|
||||
int batchno;
|
||||
HashJoinTuple hashTuple;
|
||||
|
||||
/* Locate the bucket to remove */
|
||||
@@ -1236,8 +1237,8 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
|
||||
/*
|
||||
* Calculate which bucket and batch the tuples belong to in the main
|
||||
* hashtable. They all have the same hash value, so it's the same for all
|
||||
* of them. Also note that it's not possible for nbatch to increase
|
||||
* while we are processing the tuples.
|
||||
* of them. Also note that it's not possible for nbatch to increase while
|
||||
* we are processing the tuples.
|
||||
*/
|
||||
hashvalue = bucket->hashvalue;
|
||||
ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
|
||||
@@ -1248,7 +1249,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
|
||||
{
|
||||
HashJoinTuple nextHashTuple = hashTuple->next;
|
||||
MinimalTuple tuple;
|
||||
Size tupleSize;
|
||||
Size tupleSize;
|
||||
|
||||
/*
|
||||
* This code must agree with ExecHashTableInsert. We do not use
|
||||
@@ -1286,12 +1287,12 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
|
||||
*
|
||||
* NOTE: this is not nearly as simple as it looks on the surface, because
|
||||
* of the possibility of collisions in the hashtable. Suppose that hash
|
||||
* values A and B collide at a particular hashtable entry, and that A
|
||||
* was entered first so B gets shifted to a different table entry. If
|
||||
* we were to remove A first then ExecHashGetSkewBucket would mistakenly
|
||||
* start reporting that B is not in the hashtable, because it would hit
|
||||
* the NULL before finding B. However, we always remove entries in the
|
||||
* reverse order of creation, so this failure cannot happen.
|
||||
* values A and B collide at a particular hashtable entry, and that A was
|
||||
* entered first so B gets shifted to a different table entry. If we were
|
||||
* to remove A first then ExecHashGetSkewBucket would mistakenly start
|
||||
* reporting that B is not in the hashtable, because it would hit the NULL
|
||||
* before finding B. However, we always remove entries in the reverse
|
||||
* order of creation, so this failure cannot happen.
|
||||
*/
|
||||
hashtable->skewBucket[bucketToRemove] = NULL;
|
||||
hashtable->nSkewBuckets--;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.100 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.101 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
|
||||
/* Returns true for JOIN_LEFT and JOIN_ANTI jointypes */
|
||||
#define HASHJOIN_IS_OUTER(hjstate) ((hjstate)->hj_NullInnerTupleSlot != NULL)
|
||||
#define HASHJOIN_IS_OUTER(hjstate) ((hjstate)->hj_NullInnerTupleSlot != NULL)
|
||||
|
||||
static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *outerNode,
|
||||
HashJoinState *hjstate,
|
||||
@@ -210,8 +210,8 @@ ExecHashJoin(HashJoinState *node)
|
||||
|
||||
/*
|
||||
* Now we've got an outer tuple and the corresponding hash bucket,
|
||||
* but it might not belong to the current batch, or it might
|
||||
* match a skew bucket.
|
||||
* but it might not belong to the current batch, or it might match
|
||||
* a skew bucket.
|
||||
*/
|
||||
if (batchno != hashtable->curbatch &&
|
||||
node->hj_CurSkewBucketNo == INVALID_SKEW_BUCKET_NO)
|
||||
@@ -656,13 +656,13 @@ start_over:
|
||||
BufFileClose(hashtable->outerBatchFile[curbatch]);
|
||||
hashtable->outerBatchFile[curbatch] = NULL;
|
||||
}
|
||||
else /* we just finished the first batch */
|
||||
else /* we just finished the first batch */
|
||||
{
|
||||
/*
|
||||
* Reset some of the skew optimization state variables, since we
|
||||
* no longer need to consider skew tuples after the first batch.
|
||||
* The memory context reset we are about to do will release the
|
||||
* skew hashtable itself.
|
||||
* Reset some of the skew optimization state variables, since we no
|
||||
* longer need to consider skew tuples after the first batch. The
|
||||
* memory context reset we are about to do will release the skew
|
||||
* hashtable itself.
|
||||
*/
|
||||
hashtable->skewEnabled = false;
|
||||
hashtable->skewBucket = NULL;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.131 2009/01/01 17:23:41 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.132 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -367,8 +367,8 @@ ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys)
|
||||
/*
|
||||
* Note we advance the rightmost array key most quickly, since it will
|
||||
* correspond to the lowest-order index column among the available
|
||||
* qualifications. This is hypothesized to result in better locality
|
||||
* of access in the index.
|
||||
* qualifications. This is hypothesized to result in better locality of
|
||||
* access in the index.
|
||||
*/
|
||||
for (j = numArrayKeys - 1; j >= 0; j--)
|
||||
{
|
||||
@@ -716,8 +716,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
|
||||
extra_scan_keys = n_scan_keys;
|
||||
|
||||
/*
|
||||
* for each opclause in the given qual, convert the opclause into
|
||||
* a single scan key
|
||||
* for each opclause in the given qual, convert the opclause into a single
|
||||
* scan key
|
||||
*/
|
||||
j = 0;
|
||||
foreach(qual_cell, quals)
|
||||
@@ -727,8 +727,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
|
||||
Oid opno; /* operator's OID */
|
||||
RegProcedure opfuncid; /* operator proc id used in scan */
|
||||
Oid opfamily; /* opfamily of index column */
|
||||
int op_strategy; /* operator's strategy number */
|
||||
Oid op_lefttype; /* operator's declared input types */
|
||||
int op_strategy; /* operator's strategy number */
|
||||
Oid op_lefttype; /* operator's declared input types */
|
||||
Oid op_righttype;
|
||||
Expr *leftop; /* expr on lhs of operator */
|
||||
Expr *rightop; /* expr on rhs ... */
|
||||
@@ -805,8 +805,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
|
||||
ScanKeyEntryInitialize(this_scan_key,
|
||||
flags,
|
||||
varattno, /* attribute number to scan */
|
||||
op_strategy, /* op's strategy */
|
||||
op_righttype, /* strategy subtype */
|
||||
op_strategy, /* op's strategy */
|
||||
op_righttype, /* strategy subtype */
|
||||
opfuncid, /* reg proc to use */
|
||||
scanvalue); /* constant */
|
||||
}
|
||||
@@ -983,8 +983,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
|
||||
ScanKeyEntryInitialize(this_scan_key,
|
||||
0, /* flags */
|
||||
varattno, /* attribute number to scan */
|
||||
op_strategy, /* op's strategy */
|
||||
op_righttype, /* strategy subtype */
|
||||
op_strategy, /* op's strategy */
|
||||
op_righttype, /* strategy subtype */
|
||||
opfuncid, /* reg proc to use */
|
||||
(Datum) 0); /* constant */
|
||||
}
|
||||
@@ -1015,7 +1015,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
|
||||
ScanKeyEntryInitialize(this_scan_key,
|
||||
SK_ISNULL | SK_SEARCHNULL,
|
||||
varattno, /* attribute number to scan */
|
||||
InvalidStrategy, /* no strategy */
|
||||
InvalidStrategy, /* no strategy */
|
||||
InvalidOid, /* no strategy subtype */
|
||||
InvalidOid, /* no reg proc for this */
|
||||
(Datum) 0); /* constant */
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.38 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.39 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -247,8 +247,8 @@ recompute_limits(LimitState *node)
|
||||
node->offset = DatumGetInt64(val);
|
||||
if (node->offset < 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE),
|
||||
errmsg("OFFSET must not be negative")));
|
||||
(errcode(ERRCODE_INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE),
|
||||
errmsg("OFFSET must not be negative")));
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.68 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.69 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -63,10 +63,10 @@ ExecMaterial(MaterialState *node)
|
||||
if (node->eflags & EXEC_FLAG_MARK)
|
||||
{
|
||||
/*
|
||||
* Allocate a second read pointer to serve as the mark.
|
||||
* We know it must have index 1, so needn't store that.
|
||||
* Allocate a second read pointer to serve as the mark. We know it
|
||||
* must have index 1, so needn't store that.
|
||||
*/
|
||||
int ptrno;
|
||||
int ptrno;
|
||||
|
||||
ptrno = tuplestore_alloc_read_pointer(tuplestorestate,
|
||||
node->eflags);
|
||||
@@ -185,7 +185,7 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
|
||||
/*
|
||||
* Tuplestore's interpretation of the flag bits is subtly different from
|
||||
* the general executor meaning: it doesn't think BACKWARD necessarily
|
||||
* means "backwards all the way to start". If told to support BACKWARD we
|
||||
* means "backwards all the way to start". If told to support BACKWARD we
|
||||
* must include REWIND in the tuplestore eflags, else tuplestore_trim
|
||||
* might throw away too much.
|
||||
*/
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.96 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.97 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -774,8 +774,8 @@ ExecMergeJoin(MergeJoinState *node)
|
||||
}
|
||||
|
||||
/*
|
||||
* In a semijoin, we'll consider returning the first match,
|
||||
* but after that we're done with this outer tuple.
|
||||
* In a semijoin, we'll consider returning the first
|
||||
* match, but after that we're done with this outer tuple.
|
||||
*/
|
||||
if (node->js.jointype == JOIN_SEMI)
|
||||
node->mj_JoinState = EXEC_MJ_NEXTOUTER;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.52 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.53 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -225,8 +225,8 @@ ExecNestLoop(NestLoopState *node)
|
||||
}
|
||||
|
||||
/*
|
||||
* In a semijoin, we'll consider returning the first match,
|
||||
* but after that we're done with this outer tuple.
|
||||
* In a semijoin, we'll consider returning the first match, but
|
||||
* after that we're done with this outer tuple.
|
||||
*/
|
||||
if (node->js.jointype == JOIN_SEMI)
|
||||
node->nl_NeedNewOuter = true;
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeRecursiveunion.c,v 1.3 2009/01/01 17:23:42 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeRecursiveunion.c,v 1.4 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -38,7 +38,7 @@ typedef struct RUHashEntryData
|
||||
static void
|
||||
build_hash_table(RecursiveUnionState *rustate)
|
||||
{
|
||||
RecursiveUnion *node = (RecursiveUnion *) rustate->ps.plan;
|
||||
RecursiveUnion *node = (RecursiveUnion *) rustate->ps.plan;
|
||||
|
||||
Assert(node->numCols > 0);
|
||||
Assert(node->numGroups > 0);
|
||||
@@ -58,7 +58,7 @@ build_hash_table(RecursiveUnionState *rustate)
|
||||
* ExecRecursiveUnion(node)
|
||||
*
|
||||
* Scans the recursive query sequentially and returns the next
|
||||
* qualifying tuple.
|
||||
* qualifying tuple.
|
||||
*
|
||||
* 1. evaluate non recursive term and assign the result to RT
|
||||
*
|
||||
@@ -151,8 +151,8 @@ ExecRecursiveUnion(RecursiveUnionState *node)
|
||||
}
|
||||
|
||||
/* Else, tuple is good; stash it in intermediate table ... */
|
||||
node->intermediate_empty = false;
|
||||
tuplestore_puttupleslot(node->intermediate_table, slot);
|
||||
node->intermediate_empty = false;
|
||||
tuplestore_puttupleslot(node->intermediate_table, slot);
|
||||
/* ... and return it */
|
||||
return slot;
|
||||
}
|
||||
@@ -240,8 +240,8 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags)
|
||||
ExecInitResultTupleSlot(estate, &rustate->ps);
|
||||
|
||||
/*
|
||||
* Initialize result tuple type and projection info. (Note: we have
|
||||
* to set up the result type before initializing child nodes, because
|
||||
* Initialize result tuple type and projection info. (Note: we have to
|
||||
* set up the result type before initializing child nodes, because
|
||||
* nodeWorktablescan.c expects it to be valid.)
|
||||
*/
|
||||
ExecAssignResultTypeFromTL(&rustate->ps);
|
||||
@@ -254,8 +254,8 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags)
|
||||
innerPlanState(rustate) = ExecInitNode(innerPlan(node), estate, eflags);
|
||||
|
||||
/*
|
||||
* If hashing, precompute fmgr lookup data for inner loop, and create
|
||||
* the hash table.
|
||||
* If hashing, precompute fmgr lookup data for inner loop, and create the
|
||||
* hash table.
|
||||
*/
|
||||
if (node->numCols > 0)
|
||||
{
|
||||
@@ -322,15 +322,15 @@ ExecRecursiveUnionReScan(RecursiveUnionState *node, ExprContext *exprCtxt)
|
||||
RecursiveUnion *plan = (RecursiveUnion *) node->ps.plan;
|
||||
|
||||
/*
|
||||
* Set recursive term's chgParam to tell it that we'll modify the
|
||||
* working table and therefore it has to rescan.
|
||||
* Set recursive term's chgParam to tell it that we'll modify the working
|
||||
* table and therefore it has to rescan.
|
||||
*/
|
||||
innerPlan->chgParam = bms_add_member(innerPlan->chgParam, plan->wtParam);
|
||||
|
||||
/*
|
||||
* if chgParam of subnode is not null then plan will be re-scanned by
|
||||
* first ExecProcNode. Because of above, we only have to do this to
|
||||
* the non-recursive term.
|
||||
* first ExecProcNode. Because of above, we only have to do this to the
|
||||
* non-recursive term.
|
||||
*/
|
||||
if (outerPlan->chgParam == NULL)
|
||||
ExecReScan(outerPlan, exprCtxt);
|
||||
|
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* The input of a SetOp node consists of tuples from two relations,
|
||||
* which have been combined into one dataset, with a junk attribute added
|
||||
* that shows which relation each tuple came from. In SETOP_SORTED mode,
|
||||
* that shows which relation each tuple came from. In SETOP_SORTED mode,
|
||||
* the input has furthermore been sorted according to all the grouping
|
||||
* columns (ie, all the non-junk attributes). The SetOp node scans each
|
||||
* group of identical tuples to determine how many came from each input
|
||||
@@ -18,7 +18,7 @@
|
||||
* relation is the left-hand one for EXCEPT, and tries to make the smaller
|
||||
* input relation come first for INTERSECT. We build a hash table in memory
|
||||
* with one entry for each group of identical tuples, and count the number of
|
||||
* tuples in the group from each relation. After seeing all the input, we
|
||||
* tuples in the group from each relation. After seeing all the input, we
|
||||
* scan the hashtable and generate the correct output using those counts.
|
||||
* We can avoid making hashtable entries for any tuples appearing only in the
|
||||
* second input relation, since they cannot result in any output.
|
||||
@@ -37,7 +37,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.30 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.31 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -266,15 +266,15 @@ setop_retrieve_direct(SetOpState *setopstate)
|
||||
}
|
||||
|
||||
/*
|
||||
* Store the copied first input tuple in the tuple table slot
|
||||
* reserved for it. The tuple will be deleted when it is cleared
|
||||
* from the slot.
|
||||
* Store the copied first input tuple in the tuple table slot reserved
|
||||
* for it. The tuple will be deleted when it is cleared from the
|
||||
* slot.
|
||||
*/
|
||||
ExecStoreTuple(setopstate->grp_firstTuple,
|
||||
resultTupleSlot,
|
||||
InvalidBuffer,
|
||||
true);
|
||||
setopstate->grp_firstTuple = NULL; /* don't keep two pointers */
|
||||
setopstate->grp_firstTuple = NULL; /* don't keep two pointers */
|
||||
|
||||
/* Initialize working state for a new input tuple group */
|
||||
initialize_counts(pergroup);
|
||||
@@ -318,8 +318,8 @@ setop_retrieve_direct(SetOpState *setopstate)
|
||||
}
|
||||
|
||||
/*
|
||||
* Done scanning input tuple group. See if we should emit any
|
||||
* copies of result tuple, and if so return the first copy.
|
||||
* Done scanning input tuple group. See if we should emit any copies
|
||||
* of result tuple, and if so return the first copy.
|
||||
*/
|
||||
set_output_count(setopstate, pergroup);
|
||||
|
||||
@@ -533,8 +533,8 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags)
|
||||
/*
|
||||
* initialize child nodes
|
||||
*
|
||||
* If we are hashing then the child plan does not need
|
||||
* to handle REWIND efficiently; see ExecReScanSetOp.
|
||||
* If we are hashing then the child plan does not need to handle REWIND
|
||||
* efficiently; see ExecReScanSetOp.
|
||||
*/
|
||||
if (node->strategy == SETOP_HASHED)
|
||||
eflags &= ~EXEC_FLAG_REWIND;
|
||||
|
@@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.98 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.99 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -34,9 +34,9 @@ static Datum ExecSubPlan(SubPlanState *node,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone);
|
||||
static Datum ExecAlternativeSubPlan(AlternativeSubPlanState *node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull,
|
||||
ExprDoneCond *isDone);
|
||||
static Datum ExecHashSubPlan(SubPlanState *node,
|
||||
ExprContext *econtext,
|
||||
bool *isNull);
|
||||
@@ -1073,8 +1073,8 @@ ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent)
|
||||
*
|
||||
* CTE subplans are never executed via parameter recalculation; instead
|
||||
* they get run when called by nodeCtescan.c. So don't mark the output
|
||||
* parameter of a CTE subplan as dirty, but do set the chgParam bit
|
||||
* for it so that dependent plan nodes will get told to rescan.
|
||||
* parameter of a CTE subplan as dirty, but do set the chgParam bit for it
|
||||
* so that dependent plan nodes will get told to rescan.
|
||||
*/
|
||||
foreach(l, subplan->setParam)
|
||||
{
|
||||
@@ -1099,8 +1099,8 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent)
|
||||
{
|
||||
AlternativeSubPlanState *asstate = makeNode(AlternativeSubPlanState);
|
||||
double num_calls;
|
||||
SubPlan *subplan1;
|
||||
SubPlan *subplan2;
|
||||
SubPlan *subplan1;
|
||||
SubPlan *subplan2;
|
||||
Cost cost1;
|
||||
Cost cost2;
|
||||
|
||||
@@ -1108,18 +1108,18 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent)
|
||||
asstate->xprstate.expr = (Expr *) asplan;
|
||||
|
||||
/*
|
||||
* Initialize subplans. (Can we get away with only initializing the
|
||||
* one we're going to use?)
|
||||
* Initialize subplans. (Can we get away with only initializing the one
|
||||
* we're going to use?)
|
||||
*/
|
||||
asstate->subplans = (List *) ExecInitExpr((Expr *) asplan->subplans,
|
||||
parent);
|
||||
|
||||
/*
|
||||
* Select the one to be used. For this, we need an estimate of the
|
||||
* number of executions of the subplan. We use the number of output
|
||||
* rows expected from the parent plan node. This is a good estimate
|
||||
* if we are in the parent's targetlist, and an underestimate (but
|
||||
* probably not by more than a factor of 2) if we are in the qual.
|
||||
* Select the one to be used. For this, we need an estimate of the number
|
||||
* of executions of the subplan. We use the number of output rows
|
||||
* expected from the parent plan node. This is a good estimate if we are
|
||||
* in the parent's targetlist, and an underestimate (but probably not by
|
||||
* more than a factor of 2) if we are in the qual.
|
||||
*/
|
||||
num_calls = parent->plan->plan_rows;
|
||||
|
||||
@@ -1157,8 +1157,8 @@ ExecAlternativeSubPlan(AlternativeSubPlanState *node,
|
||||
ExprDoneCond *isDone)
|
||||
{
|
||||
/* Just pass control to the active subplan */
|
||||
SubPlanState *activesp = (SubPlanState *) list_nth(node->subplans,
|
||||
node->active);
|
||||
SubPlanState *activesp = (SubPlanState *) list_nth(node->subplans,
|
||||
node->active);
|
||||
|
||||
Assert(IsA(activesp, SubPlanState));
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.61 2009/01/01 17:23:42 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.62 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -63,10 +63,10 @@ TidListCreate(TidScanState *tidstate)
|
||||
ListCell *l;
|
||||
|
||||
/*
|
||||
* We silently discard any TIDs that are out of range at the time of
|
||||
* scan start. (Since we hold at least AccessShareLock on the table,
|
||||
* it won't be possible for someone to truncate away the blocks we
|
||||
* intend to visit.)
|
||||
* We silently discard any TIDs that are out of range at the time of scan
|
||||
* start. (Since we hold at least AccessShareLock on the table, it won't
|
||||
* be possible for someone to truncate away the blocks we intend to
|
||||
* visit.)
|
||||
*/
|
||||
nblocks = RelationGetNumberOfBlocks(tidstate->ss.ss_currentRelation);
|
||||
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* Routines to handle unique'ing of queries where appropriate
|
||||
*
|
||||
* Unique is a very simple node type that just filters out duplicate
|
||||
* tuples from a stream of sorted tuples from its subplan. It's essentially
|
||||
* tuples from a stream of sorted tuples from its subplan. It's essentially
|
||||
* a dumbed-down form of Group: the duplicate-removal functionality is
|
||||
* identical. However, Unique doesn't do projection nor qual checking,
|
||||
* so it's marginally more efficient for cases where neither is needed.
|
||||
@@ -16,7 +16,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.60 2009/04/02 20:59:10 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.61 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -58,8 +58,8 @@ ExecUnique(UniqueState *node)
|
||||
|
||||
/*
|
||||
* now loop, returning only non-duplicate tuples. We assume that the
|
||||
* tuples arrive in sorted order so we can detect duplicates easily.
|
||||
* The first tuple of each group is returned.
|
||||
* tuples arrive in sorted order so we can detect duplicates easily. The
|
||||
* first tuple of each group is returned.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* routines to handle WindowAgg nodes.
|
||||
*
|
||||
* A WindowAgg node evaluates "window functions" across suitable partitions
|
||||
* of the input tuple set. Any one WindowAgg works for just a single window
|
||||
* of the input tuple set. Any one WindowAgg works for just a single window
|
||||
* specification, though it can evaluate multiple window functions sharing
|
||||
* identical window specifications. The input tuples are required to be
|
||||
* delivered in sorted order, with the PARTITION BY columns (if any) as
|
||||
@@ -14,7 +14,7 @@
|
||||
*
|
||||
* Since window functions can require access to any or all of the rows in
|
||||
* the current partition, we accumulate rows of the partition into a
|
||||
* tuplestore. The window functions are called using the WindowObject API
|
||||
* tuplestore. The window functions are called using the WindowObject API
|
||||
* so that they can access those rows as needed.
|
||||
*
|
||||
* We also support using plain aggregate functions as window functions.
|
||||
@@ -27,7 +27,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeWindowAgg.c,v 1.4 2009/03/27 18:30:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeWindowAgg.c,v 1.5 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -75,15 +75,15 @@ typedef struct WindowStatePerFuncData
|
||||
{
|
||||
/* Links to WindowFunc expr and state nodes this working state is for */
|
||||
WindowFuncExprState *wfuncstate;
|
||||
WindowFunc *wfunc;
|
||||
WindowFunc *wfunc;
|
||||
|
||||
int numArguments; /* number of arguments */
|
||||
|
||||
FmgrInfo flinfo; /* fmgr lookup data for window function */
|
||||
|
||||
/*
|
||||
* We need the len and byval info for the result of each function
|
||||
* in order to know how to copy/delete values.
|
||||
* We need the len and byval info for the result of each function in order
|
||||
* to know how to copy/delete values.
|
||||
*/
|
||||
int16 resulttypeLen;
|
||||
bool resulttypeByVal;
|
||||
@@ -91,7 +91,7 @@ typedef struct WindowStatePerFuncData
|
||||
bool plain_agg; /* is it just a plain aggregate function? */
|
||||
int aggno; /* if so, index of its PerAggData */
|
||||
|
||||
WindowObject winobj; /* object used in window function API */
|
||||
WindowObject winobj; /* object used in window function API */
|
||||
} WindowStatePerFuncData;
|
||||
|
||||
/*
|
||||
@@ -144,38 +144,38 @@ typedef struct WindowStatePerAggData
|
||||
} WindowStatePerAggData;
|
||||
|
||||
static void initialize_windowaggregate(WindowAggState *winstate,
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate);
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate);
|
||||
static void advance_windowaggregate(WindowAggState *winstate,
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate);
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate);
|
||||
static void finalize_windowaggregate(WindowAggState *winstate,
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate,
|
||||
Datum *result, bool *isnull);
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate,
|
||||
Datum *result, bool *isnull);
|
||||
|
||||
static void eval_windowaggregates(WindowAggState *winstate);
|
||||
static void eval_windowfunction(WindowAggState *winstate,
|
||||
WindowStatePerFunc perfuncstate,
|
||||
Datum *result, bool *isnull);
|
||||
WindowStatePerFunc perfuncstate,
|
||||
Datum *result, bool *isnull);
|
||||
|
||||
static void begin_partition(WindowAggState *winstate);
|
||||
static void spool_tuples(WindowAggState *winstate, int64 pos);
|
||||
static void release_partition(WindowAggState *winstate);
|
||||
|
||||
static bool row_is_in_frame(WindowAggState *winstate, int64 pos,
|
||||
TupleTableSlot *slot);
|
||||
TupleTableSlot *slot);
|
||||
static void update_frametailpos(WindowObject winobj, TupleTableSlot *slot);
|
||||
|
||||
static WindowStatePerAggData *initialize_peragg(WindowAggState *winstate,
|
||||
WindowFunc *wfunc,
|
||||
WindowStatePerAgg peraggstate);
|
||||
WindowFunc *wfunc,
|
||||
WindowStatePerAgg peraggstate);
|
||||
static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
|
||||
|
||||
static bool are_peers(WindowAggState *winstate, TupleTableSlot *slot1,
|
||||
TupleTableSlot *slot2);
|
||||
TupleTableSlot *slot2);
|
||||
static bool window_gettupleslot(WindowObject winobj, int64 pos,
|
||||
TupleTableSlot *slot);
|
||||
TupleTableSlot *slot);
|
||||
|
||||
|
||||
/*
|
||||
@@ -187,7 +187,7 @@ initialize_windowaggregate(WindowAggState *winstate,
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate)
|
||||
{
|
||||
MemoryContext oldContext;
|
||||
MemoryContext oldContext;
|
||||
|
||||
if (peraggstate->initValueIsNull)
|
||||
peraggstate->transValue = peraggstate->initValue;
|
||||
@@ -213,14 +213,14 @@ advance_windowaggregate(WindowAggState *winstate,
|
||||
WindowStatePerFunc perfuncstate,
|
||||
WindowStatePerAgg peraggstate)
|
||||
{
|
||||
WindowFuncExprState *wfuncstate = perfuncstate->wfuncstate;
|
||||
int numArguments = perfuncstate->numArguments;
|
||||
FunctionCallInfoData fcinfodata;
|
||||
FunctionCallInfo fcinfo = &fcinfodata;
|
||||
Datum newVal;
|
||||
ListCell *arg;
|
||||
int i;
|
||||
MemoryContext oldContext;
|
||||
WindowFuncExprState *wfuncstate = perfuncstate->wfuncstate;
|
||||
int numArguments = perfuncstate->numArguments;
|
||||
FunctionCallInfoData fcinfodata;
|
||||
FunctionCallInfo fcinfo = &fcinfodata;
|
||||
Datum newVal;
|
||||
ListCell *arg;
|
||||
int i;
|
||||
MemoryContext oldContext;
|
||||
ExprContext *econtext = winstate->tmpcontext;
|
||||
|
||||
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
|
||||
@@ -229,7 +229,7 @@ advance_windowaggregate(WindowAggState *winstate,
|
||||
i = 1;
|
||||
foreach(arg, wfuncstate->args)
|
||||
{
|
||||
ExprState *argstate = (ExprState *) lfirst(arg);
|
||||
ExprState *argstate = (ExprState *) lfirst(arg);
|
||||
|
||||
fcinfo->arg[i] = ExecEvalExpr(argstate, econtext,
|
||||
&fcinfo->argnull[i], NULL);
|
||||
@@ -263,8 +263,8 @@ advance_windowaggregate(WindowAggState *winstate,
|
||||
*/
|
||||
MemoryContextSwitchTo(winstate->wincontext);
|
||||
peraggstate->transValue = datumCopy(fcinfo->arg[1],
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
peraggstate->transValueIsNull = false;
|
||||
peraggstate->noTransValue = false;
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
@@ -327,7 +327,7 @@ finalize_windowaggregate(WindowAggState *winstate,
|
||||
WindowStatePerAgg peraggstate,
|
||||
Datum *result, bool *isnull)
|
||||
{
|
||||
MemoryContext oldContext;
|
||||
MemoryContext oldContext;
|
||||
|
||||
oldContext = MemoryContextSwitchTo(winstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
|
||||
|
||||
@@ -336,7 +336,7 @@ finalize_windowaggregate(WindowAggState *winstate,
|
||||
*/
|
||||
if (OidIsValid(peraggstate->finalfn_oid))
|
||||
{
|
||||
FunctionCallInfoData fcinfo;
|
||||
FunctionCallInfoData fcinfo;
|
||||
|
||||
InitFunctionCallInfoData(fcinfo, &(peraggstate->finalfn), 1,
|
||||
(void *) winstate, NULL);
|
||||
@@ -384,12 +384,13 @@ finalize_windowaggregate(WindowAggState *winstate,
|
||||
static void
|
||||
eval_windowaggregates(WindowAggState *winstate)
|
||||
{
|
||||
WindowStatePerAgg peraggstate;
|
||||
int wfuncno, numaggs;
|
||||
int i;
|
||||
MemoryContext oldContext;
|
||||
ExprContext *econtext;
|
||||
TupleTableSlot *agg_row_slot;
|
||||
WindowStatePerAgg peraggstate;
|
||||
int wfuncno,
|
||||
numaggs;
|
||||
int i;
|
||||
MemoryContext oldContext;
|
||||
ExprContext *econtext;
|
||||
TupleTableSlot *agg_row_slot;
|
||||
|
||||
numaggs = winstate->numaggs;
|
||||
if (numaggs == 0)
|
||||
@@ -400,44 +401,43 @@ eval_windowaggregates(WindowAggState *winstate)
|
||||
|
||||
/*
|
||||
* Currently, we support only a subset of the SQL-standard window framing
|
||||
* rules. In all the supported cases, the window frame always consists
|
||||
* of a contiguous group of rows extending forward from the start of the
|
||||
* partition, and rows only enter the frame, never exit it, as the
|
||||
* current row advances forward. This makes it possible to use an
|
||||
* incremental strategy for evaluating aggregates: we run the transition
|
||||
* function for each row added to the frame, and run the final function
|
||||
* whenever we need the current aggregate value. This is considerably
|
||||
* more efficient than the naive approach of re-running the entire
|
||||
* aggregate calculation for each current row. It does assume that the
|
||||
* final function doesn't damage the running transition value. (Some
|
||||
* C-coded aggregates do that for efficiency's sake --- but they are
|
||||
* supposed to do so only when their fcinfo->context is an AggState, not
|
||||
* a WindowAggState.)
|
||||
* rules. In all the supported cases, the window frame always consists of
|
||||
* a contiguous group of rows extending forward from the start of the
|
||||
* partition, and rows only enter the frame, never exit it, as the current
|
||||
* row advances forward. This makes it possible to use an incremental
|
||||
* strategy for evaluating aggregates: we run the transition function for
|
||||
* each row added to the frame, and run the final function whenever we
|
||||
* need the current aggregate value. This is considerably more efficient
|
||||
* than the naive approach of re-running the entire aggregate calculation
|
||||
* for each current row. It does assume that the final function doesn't
|
||||
* damage the running transition value. (Some C-coded aggregates do that
|
||||
* for efficiency's sake --- but they are supposed to do so only when
|
||||
* their fcinfo->context is an AggState, not a WindowAggState.)
|
||||
*
|
||||
* In many common cases, multiple rows share the same frame and hence
|
||||
* the same aggregate value. (In particular, if there's no ORDER BY in
|
||||
* a RANGE window, then all rows are peers and so they all have window
|
||||
* frame equal to the whole partition.) We optimize such cases by
|
||||
* calculating the aggregate value once when we reach the first row of a
|
||||
* peer group, and then returning the saved value for all subsequent rows.
|
||||
* In many common cases, multiple rows share the same frame and hence the
|
||||
* same aggregate value. (In particular, if there's no ORDER BY in a RANGE
|
||||
* window, then all rows are peers and so they all have window frame equal
|
||||
* to the whole partition.) We optimize such cases by calculating the
|
||||
* aggregate value once when we reach the first row of a peer group, and
|
||||
* then returning the saved value for all subsequent rows.
|
||||
*
|
||||
* 'aggregatedupto' keeps track of the first row that has not yet been
|
||||
* accumulated into the aggregate transition values. Whenever we start a
|
||||
* new peer group, we accumulate forward to the end of the peer group.
|
||||
*
|
||||
* TODO: In the future, we should implement the full SQL-standard set
|
||||
* of framing rules. We could implement the other cases by recalculating
|
||||
* the aggregates whenever a row exits the frame. That would be pretty
|
||||
* slow, though. For aggregates like SUM and COUNT we could implement a
|
||||
* TODO: In the future, we should implement the full SQL-standard set of
|
||||
* framing rules. We could implement the other cases by recalculating the
|
||||
* aggregates whenever a row exits the frame. That would be pretty slow,
|
||||
* though. For aggregates like SUM and COUNT we could implement a
|
||||
* "negative transition function" that would be called for each row as it
|
||||
* exits the frame. We'd have to think about avoiding recalculation of
|
||||
* volatile arguments of aggregate functions, too.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If we've already aggregated up through current row, reuse the
|
||||
* saved result values. NOTE: this test works for the currently
|
||||
* supported framing rules, but will need fixing when more are added.
|
||||
* If we've already aggregated up through current row, reuse the saved
|
||||
* result values. NOTE: this test works for the currently supported
|
||||
* framing rules, but will need fixing when more are added.
|
||||
*/
|
||||
if (winstate->aggregatedupto > winstate->currentpos)
|
||||
{
|
||||
@@ -467,9 +467,9 @@ eval_windowaggregates(WindowAggState *winstate)
|
||||
/*
|
||||
* Advance until we reach a row not in frame (or end of partition).
|
||||
*
|
||||
* Note the loop invariant: agg_row_slot is either empty or holds the
|
||||
* row at position aggregatedupto. The agg_ptr read pointer must always
|
||||
* point to the next row to read into agg_row_slot.
|
||||
* Note the loop invariant: agg_row_slot is either empty or holds the row
|
||||
* at position aggregatedupto. The agg_ptr read pointer must always point
|
||||
* to the next row to read into agg_row_slot.
|
||||
*/
|
||||
agg_row_slot = winstate->agg_row_slot;
|
||||
for (;;)
|
||||
@@ -530,16 +530,16 @@ eval_windowaggregates(WindowAggState *winstate)
|
||||
/*
|
||||
* save the result in case next row shares the same frame.
|
||||
*
|
||||
* XXX in some framing modes, eg ROWS/END_CURRENT_ROW, we can know
|
||||
* in advance that the next row can't possibly share the same frame.
|
||||
* Is it worth detecting that and skipping this code?
|
||||
* XXX in some framing modes, eg ROWS/END_CURRENT_ROW, we can know in
|
||||
* advance that the next row can't possibly share the same frame. Is
|
||||
* it worth detecting that and skipping this code?
|
||||
*/
|
||||
if (!peraggstate->resulttypeByVal)
|
||||
{
|
||||
/*
|
||||
* clear old resultValue in order not to leak memory. (Note:
|
||||
* the new result can't possibly be the same datum as old
|
||||
* resultValue, because we never passed it to the trans function.)
|
||||
* clear old resultValue in order not to leak memory. (Note: the
|
||||
* new result can't possibly be the same datum as old resultValue,
|
||||
* because we never passed it to the trans function.)
|
||||
*/
|
||||
if (!peraggstate->resultValueIsNull)
|
||||
pfree(DatumGetPointer(peraggstate->resultValue));
|
||||
@@ -579,15 +579,15 @@ eval_windowfunction(WindowAggState *winstate, WindowStatePerFunc perfuncstate,
|
||||
Datum *result, bool *isnull)
|
||||
{
|
||||
FunctionCallInfoData fcinfo;
|
||||
MemoryContext oldContext;
|
||||
MemoryContext oldContext;
|
||||
|
||||
oldContext = MemoryContextSwitchTo(winstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
|
||||
|
||||
/*
|
||||
* We don't pass any normal arguments to a window function, but we do
|
||||
* pass it the number of arguments, in order to permit window function
|
||||
* implementations to support varying numbers of arguments. The real
|
||||
* info goes through the WindowObject, which is passed via fcinfo->context.
|
||||
* We don't pass any normal arguments to a window function, but we do pass
|
||||
* it the number of arguments, in order to permit window function
|
||||
* implementations to support varying numbers of arguments. The real info
|
||||
* goes through the WindowObject, which is passed via fcinfo->context.
|
||||
*/
|
||||
InitFunctionCallInfoData(fcinfo, &(perfuncstate->flinfo),
|
||||
perfuncstate->numArguments,
|
||||
@@ -599,9 +599,9 @@ eval_windowfunction(WindowAggState *winstate, WindowStatePerFunc perfuncstate,
|
||||
*isnull = fcinfo.isnull;
|
||||
|
||||
/*
|
||||
* Make sure pass-by-ref data is allocated in the appropriate context.
|
||||
* (We need this in case the function returns a pointer into some
|
||||
* short-lived tuple, as is entirely possible.)
|
||||
* Make sure pass-by-ref data is allocated in the appropriate context. (We
|
||||
* need this in case the function returns a pointer into some short-lived
|
||||
* tuple, as is entirely possible.)
|
||||
*/
|
||||
if (!perfuncstate->resulttypeByVal && !fcinfo.isnull &&
|
||||
!MemoryContextContains(CurrentMemoryContext,
|
||||
@@ -620,9 +620,9 @@ eval_windowfunction(WindowAggState *winstate, WindowStatePerFunc perfuncstate,
|
||||
static void
|
||||
begin_partition(WindowAggState *winstate)
|
||||
{
|
||||
PlanState *outerPlan = outerPlanState(winstate);
|
||||
int numfuncs = winstate->numfuncs;
|
||||
int i;
|
||||
PlanState *outerPlan = outerPlanState(winstate);
|
||||
int numfuncs = winstate->numfuncs;
|
||||
int i;
|
||||
|
||||
winstate->partition_spooled = false;
|
||||
winstate->frametail_valid = false;
|
||||
@@ -633,15 +633,15 @@ begin_partition(WindowAggState *winstate)
|
||||
ExecClearTuple(winstate->agg_row_slot);
|
||||
|
||||
/*
|
||||
* If this is the very first partition, we need to fetch the first
|
||||
* input row to store in first_part_slot.
|
||||
* If this is the very first partition, we need to fetch the first input
|
||||
* row to store in first_part_slot.
|
||||
*/
|
||||
if (TupIsNull(winstate->first_part_slot))
|
||||
{
|
||||
TupleTableSlot *outerslot = ExecProcNode(outerPlan);
|
||||
|
||||
if (!TupIsNull(outerslot))
|
||||
ExecCopySlot(winstate->first_part_slot, outerslot);
|
||||
ExecCopySlot(winstate->first_part_slot, outerslot);
|
||||
else
|
||||
{
|
||||
/* outer plan is empty, so we have nothing to do */
|
||||
@@ -671,16 +671,16 @@ begin_partition(WindowAggState *winstate)
|
||||
/* create mark and read pointers for each real window function */
|
||||
for (i = 0; i < numfuncs; i++)
|
||||
{
|
||||
WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]);
|
||||
WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]);
|
||||
|
||||
if (!perfuncstate->plain_agg)
|
||||
{
|
||||
WindowObject winobj = perfuncstate->winobj;
|
||||
WindowObject winobj = perfuncstate->winobj;
|
||||
|
||||
winobj->markptr = tuplestore_alloc_read_pointer(winstate->buffer,
|
||||
0);
|
||||
winobj->readptr = tuplestore_alloc_read_pointer(winstate->buffer,
|
||||
EXEC_FLAG_BACKWARD);
|
||||
EXEC_FLAG_BACKWARD);
|
||||
winobj->markpos = -1;
|
||||
winobj->seekpos = -1;
|
||||
}
|
||||
@@ -701,8 +701,8 @@ begin_partition(WindowAggState *winstate)
|
||||
static void
|
||||
spool_tuples(WindowAggState *winstate, int64 pos)
|
||||
{
|
||||
WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan;
|
||||
PlanState *outerPlan;
|
||||
WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan;
|
||||
PlanState *outerPlan;
|
||||
TupleTableSlot *outerslot;
|
||||
MemoryContext oldcontext;
|
||||
|
||||
@@ -713,7 +713,7 @@ spool_tuples(WindowAggState *winstate, int64 pos)
|
||||
|
||||
/*
|
||||
* If the tuplestore has spilled to disk, alternate reading and writing
|
||||
* becomes quite expensive due to frequent buffer flushes. It's cheaper
|
||||
* becomes quite expensive due to frequent buffer flushes. It's cheaper
|
||||
* to force the entire partition to get spooled in one go.
|
||||
*
|
||||
* XXX this is a horrid kluge --- it'd be better to fix the performance
|
||||
@@ -773,11 +773,11 @@ spool_tuples(WindowAggState *winstate, int64 pos)
|
||||
static void
|
||||
release_partition(WindowAggState *winstate)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < winstate->numfuncs; i++)
|
||||
{
|
||||
WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]);
|
||||
WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]);
|
||||
|
||||
/* Release any partition-local state of this window function */
|
||||
if (perfuncstate->winobj)
|
||||
@@ -804,7 +804,7 @@ release_partition(WindowAggState *winstate)
|
||||
* to our window framing rule
|
||||
*
|
||||
* The caller must have already determined that the row is in the partition
|
||||
* and fetched it into a slot. This function just encapsulates the framing
|
||||
* and fetched it into a slot. This function just encapsulates the framing
|
||||
* rules.
|
||||
*/
|
||||
static bool
|
||||
@@ -895,8 +895,8 @@ update_frametailpos(WindowObject winobj, TupleTableSlot *slot)
|
||||
}
|
||||
|
||||
/*
|
||||
* Else we have to search for the first non-peer of the current row.
|
||||
* We assume the current value of frametailpos is a lower bound on the
|
||||
* Else we have to search for the first non-peer of the current row. We
|
||||
* assume the current value of frametailpos is a lower bound on the
|
||||
* possible frame tail location, ie, frame tail never goes backward, and
|
||||
* that currentpos is also a lower bound, ie, current row is always in
|
||||
* frame.
|
||||
@@ -929,18 +929,18 @@ TupleTableSlot *
|
||||
ExecWindowAgg(WindowAggState *winstate)
|
||||
{
|
||||
TupleTableSlot *result;
|
||||
ExprDoneCond isDone;
|
||||
ExprContext *econtext;
|
||||
int i;
|
||||
int numfuncs;
|
||||
ExprDoneCond isDone;
|
||||
ExprContext *econtext;
|
||||
int i;
|
||||
int numfuncs;
|
||||
|
||||
if (winstate->all_done)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Check to see if we're still projecting out tuples from a previous output
|
||||
* tuple (because there is a function-returning-set in the projection
|
||||
* expressions). If so, try to project another one.
|
||||
* Check to see if we're still projecting out tuples from a previous
|
||||
* output tuple (because there is a function-returning-set in the
|
||||
* projection expressions). If so, try to project another one.
|
||||
*/
|
||||
if (winstate->ss.ps.ps_TupFromTlist)
|
||||
{
|
||||
@@ -1003,8 +1003,8 @@ restart:
|
||||
* Read the current row from the tuplestore, and save in ScanTupleSlot.
|
||||
* (We can't rely on the outerplan's output slot because we may have to
|
||||
* read beyond the current row. Also, we have to actually copy the row
|
||||
* out of the tuplestore, since window function evaluation might cause
|
||||
* the tuplestore to dump its state to disk.)
|
||||
* out of the tuplestore, since window function evaluation might cause the
|
||||
* tuplestore to dump its state to disk.)
|
||||
*
|
||||
* Current row must be in the tuplestore, since we spooled it above.
|
||||
*/
|
||||
@@ -1019,13 +1019,13 @@ restart:
|
||||
numfuncs = winstate->numfuncs;
|
||||
for (i = 0; i < numfuncs; i++)
|
||||
{
|
||||
WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]);
|
||||
WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]);
|
||||
|
||||
if (perfuncstate->plain_agg)
|
||||
continue;
|
||||
eval_windowfunction(winstate, perfuncstate,
|
||||
&(econtext->ecxt_aggvalues[perfuncstate->wfuncstate->wfuncno]),
|
||||
&(econtext->ecxt_aggnulls[perfuncstate->wfuncstate->wfuncno]));
|
||||
&(econtext->ecxt_aggvalues[perfuncstate->wfuncstate->wfuncno]),
|
||||
&(econtext->ecxt_aggnulls[perfuncstate->wfuncstate->wfuncno]));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1040,9 +1040,9 @@ restart:
|
||||
tuplestore_trim(winstate->buffer);
|
||||
|
||||
/*
|
||||
* Form and return a projection tuple using the windowfunc results
|
||||
* and the current row. Setting ecxt_outertuple arranges that any
|
||||
* Vars will be evaluated with respect to that row.
|
||||
* Form and return a projection tuple using the windowfunc results and the
|
||||
* current row. Setting ecxt_outertuple arranges that any Vars will be
|
||||
* evaluated with respect to that row.
|
||||
*/
|
||||
econtext->ecxt_outertuple = winstate->ss.ss_ScanTupleSlot;
|
||||
result = ExecProject(winstate->ss.ps.ps_ProjInfo, &isDone);
|
||||
@@ -1072,8 +1072,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
|
||||
Plan *outerPlan;
|
||||
ExprContext *econtext;
|
||||
ExprContext *tmpcontext;
|
||||
WindowStatePerFunc perfunc;
|
||||
WindowStatePerAgg peragg;
|
||||
WindowStatePerFunc perfunc;
|
||||
WindowStatePerAgg peragg;
|
||||
int numfuncs,
|
||||
wfuncno,
|
||||
numaggs,
|
||||
@@ -1163,7 +1163,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
|
||||
/* Set up data for comparing tuples */
|
||||
if (node->partNumCols > 0)
|
||||
winstate->partEqfunctions = execTuplesMatchPrepare(node->partNumCols,
|
||||
node->partOperators);
|
||||
node->partOperators);
|
||||
if (node->ordNumCols > 0)
|
||||
winstate->ordEqfunctions = execTuplesMatchPrepare(node->ordNumCols,
|
||||
node->ordOperators);
|
||||
@@ -1189,13 +1189,13 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
|
||||
aggno = -1;
|
||||
foreach(l, winstate->funcs)
|
||||
{
|
||||
WindowFuncExprState *wfuncstate = (WindowFuncExprState *) lfirst(l);
|
||||
WindowFunc *wfunc = (WindowFunc *) wfuncstate->xprstate.expr;
|
||||
WindowFuncExprState *wfuncstate = (WindowFuncExprState *) lfirst(l);
|
||||
WindowFunc *wfunc = (WindowFunc *) wfuncstate->xprstate.expr;
|
||||
WindowStatePerFunc perfuncstate;
|
||||
AclResult aclresult;
|
||||
int i;
|
||||
|
||||
if (wfunc->winref != node->winref) /* planner screwed up? */
|
||||
if (wfunc->winref != node->winref) /* planner screwed up? */
|
||||
elog(ERROR, "WindowFunc with winref %u assigned to WindowAgg with winref %u",
|
||||
wfunc->winref, node->winref);
|
||||
|
||||
@@ -1239,13 +1239,13 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
|
||||
&perfuncstate->resulttypeByVal);
|
||||
|
||||
/*
|
||||
* If it's really just a plain aggregate function,
|
||||
* we'll emulate the Agg environment for it.
|
||||
* If it's really just a plain aggregate function, we'll emulate the
|
||||
* Agg environment for it.
|
||||
*/
|
||||
perfuncstate->plain_agg = wfunc->winagg;
|
||||
if (wfunc->winagg)
|
||||
{
|
||||
WindowStatePerAgg peraggstate;
|
||||
WindowStatePerAgg peraggstate;
|
||||
|
||||
perfuncstate->aggno = ++aggno;
|
||||
peraggstate = &winstate->peragg[aggno];
|
||||
@@ -1325,7 +1325,7 @@ ExecEndWindowAgg(WindowAggState *node)
|
||||
void
|
||||
ExecReScanWindowAgg(WindowAggState *node, ExprContext *exprCtxt)
|
||||
{
|
||||
ExprContext *econtext = node->ss.ps.ps_ExprContext;
|
||||
ExprContext *econtext = node->ss.ps.ps_ExprContext;
|
||||
|
||||
node->all_done = false;
|
||||
|
||||
@@ -1489,11 +1489,10 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
|
||||
aggtranstype);
|
||||
|
||||
/*
|
||||
* If the transfn is strict and the initval is NULL, make sure input
|
||||
* type and transtype are the same (or at least binary-compatible), so
|
||||
* that it's OK to use the first input value as the initial
|
||||
* transValue. This should have been checked at agg definition time,
|
||||
* but just in case...
|
||||
* If the transfn is strict and the initval is NULL, make sure input type
|
||||
* and transtype are the same (or at least binary-compatible), so that
|
||||
* it's OK to use the first input value as the initial transValue. This
|
||||
* should have been checked at agg definition time, but just in case...
|
||||
*/
|
||||
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
|
||||
{
|
||||
@@ -1579,10 +1578,10 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot)
|
||||
tuplestore_select_read_pointer(winstate->buffer, winobj->readptr);
|
||||
|
||||
/*
|
||||
* There's no API to refetch the tuple at the current position. We
|
||||
* have to move one tuple forward, and then one backward. (We don't
|
||||
* do it the other way because we might try to fetch the row before
|
||||
* our mark, which isn't allowed.)
|
||||
* There's no API to refetch the tuple at the current position. We have to
|
||||
* move one tuple forward, and then one backward. (We don't do it the
|
||||
* other way because we might try to fetch the row before our mark, which
|
||||
* isn't allowed.)
|
||||
*/
|
||||
if (winobj->seekpos == pos)
|
||||
{
|
||||
@@ -1623,7 +1622,7 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot)
|
||||
* requested amount of space. Subsequent calls just return the same chunk.
|
||||
*
|
||||
* Memory obtained this way is normally used to hold state that should be
|
||||
* automatically reset for each new partition. If a window function wants
|
||||
* automatically reset for each new partition. If a window function wants
|
||||
* to hold state across the whole query, fcinfo->fn_extra can be used in the
|
||||
* usual way for that.
|
||||
*/
|
||||
@@ -1710,10 +1709,10 @@ bool
|
||||
WinRowsArePeers(WindowObject winobj, int64 pos1, int64 pos2)
|
||||
{
|
||||
WindowAggState *winstate;
|
||||
WindowAgg *node;
|
||||
WindowAgg *node;
|
||||
TupleTableSlot *slot1;
|
||||
TupleTableSlot *slot2;
|
||||
bool res;
|
||||
bool res;
|
||||
|
||||
Assert(WindowObjectIsValid(winobj));
|
||||
winstate = winobj->winstate;
|
||||
@@ -1789,7 +1788,7 @@ WinGetFuncArgInPartition(WindowObject winobj, int argno,
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized window seek type: %d", seektype);
|
||||
abs_pos = 0; /* keep compiler quiet */
|
||||
abs_pos = 0; /* keep compiler quiet */
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1862,7 +1861,7 @@ WinGetFuncArgInFrame(WindowObject winobj, int argno,
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized window seek type: %d", seektype);
|
||||
abs_pos = 0; /* keep compiler quiet */
|
||||
abs_pos = 0; /* keep compiler quiet */
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeWorktablescan.c,v 1.6 2009/03/27 18:30:21 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeWorktablescan.c,v 1.7 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -73,10 +73,10 @@ TupleTableSlot *
|
||||
ExecWorkTableScan(WorkTableScanState *node)
|
||||
{
|
||||
/*
|
||||
* On the first call, find the ancestor RecursiveUnion's state
|
||||
* via the Param slot reserved for it. (We can't do this during node
|
||||
* init because there are corner cases where we'll get the init call
|
||||
* before the RecursiveUnion does.)
|
||||
* On the first call, find the ancestor RecursiveUnion's state via the
|
||||
* Param slot reserved for it. (We can't do this during node init because
|
||||
* there are corner cases where we'll get the init call before the
|
||||
* RecursiveUnion does.)
|
||||
*/
|
||||
if (node->rustate == NULL)
|
||||
{
|
||||
@@ -100,8 +100,8 @@ ExecWorkTableScan(WorkTableScanState *node)
|
||||
ExecGetResultType(&node->rustate->ps));
|
||||
|
||||
/*
|
||||
* Now we can initialize the projection info. This must be
|
||||
* completed before we can call ExecScan().
|
||||
* Now we can initialize the projection info. This must be completed
|
||||
* before we can call ExecScan().
|
||||
*/
|
||||
ExecAssignScanProjectionInfo(&node->ss);
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.207 2009/01/21 11:02:40 heikki Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.208 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -45,11 +45,11 @@ static int _SPI_connected = -1;
|
||||
static int _SPI_curid = -1;
|
||||
|
||||
static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
|
||||
Datum *Values, const char *Nulls,
|
||||
bool read_only, int pflags);
|
||||
Datum *Values, const char *Nulls,
|
||||
bool read_only, int pflags);
|
||||
|
||||
static void _SPI_prepare_plan(const char *src, SPIPlanPtr plan,
|
||||
ParamListInfo boundParams);
|
||||
ParamListInfo boundParams);
|
||||
|
||||
static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
Snapshot snapshot, Snapshot crosscheck_snapshot,
|
||||
@@ -308,7 +308,7 @@ SPI_pop(void)
|
||||
bool
|
||||
SPI_push_conditional(void)
|
||||
{
|
||||
bool pushed = (_SPI_curid != _SPI_connected);
|
||||
bool pushed = (_SPI_curid != _SPI_connected);
|
||||
|
||||
if (pushed)
|
||||
{
|
||||
@@ -962,7 +962,7 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
|
||||
/*
|
||||
* SPI_cursor_open_with_args()
|
||||
*
|
||||
* Parse and plan a query and open it as a portal. Like SPI_execute_with_args,
|
||||
* Parse and plan a query and open it as a portal. Like SPI_execute_with_args,
|
||||
* we can tell the planner to rely on the parameter values as constants,
|
||||
* because the plan will only be used once.
|
||||
*/
|
||||
@@ -1212,8 +1212,8 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the snapshot to use. (PortalStart will do PushActiveSnapshot, so
|
||||
* we skip that here.)
|
||||
* Set up the snapshot to use. (PortalStart will do PushActiveSnapshot,
|
||||
* so we skip that here.)
|
||||
*/
|
||||
if (read_only)
|
||||
snapshot = GetActiveSnapshot();
|
||||
@@ -1767,13 +1767,13 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
if (read_only && !CommandIsReadOnly(stmt))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
/* translator: %s is a SQL statement name */
|
||||
errmsg("%s is not allowed in a non-volatile function",
|
||||
CreateCommandTag(stmt))));
|
||||
/* translator: %s is a SQL statement name */
|
||||
errmsg("%s is not allowed in a non-volatile function",
|
||||
CreateCommandTag(stmt))));
|
||||
|
||||
/*
|
||||
* If not read-only mode, advance the command counter before
|
||||
* each command.
|
||||
* If not read-only mode, advance the command counter before each
|
||||
* command.
|
||||
*/
|
||||
if (!read_only)
|
||||
CommandCounterIncrement();
|
||||
@@ -1784,7 +1784,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
{
|
||||
/*
|
||||
* Default read_only behavior is to use the entry-time
|
||||
* ActiveSnapshot, if any; if read-write, grab a full new snap.
|
||||
* ActiveSnapshot, if any; if read-write, grab a full new
|
||||
* snap.
|
||||
*/
|
||||
if (read_only)
|
||||
{
|
||||
@@ -1804,8 +1805,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
{
|
||||
/*
|
||||
* We interpret read_only with a specified snapshot to be
|
||||
* exactly that snapshot, but read-write means use the
|
||||
* snap with advancing of command ID.
|
||||
* exactly that snapshot, but read-write means use the snap
|
||||
* with advancing of command ID.
|
||||
*/
|
||||
if (read_only)
|
||||
PushActiveSnapshot(snapshot);
|
||||
@@ -1839,7 +1840,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
ProcessUtility(stmt,
|
||||
plansource->query_string,
|
||||
paramLI,
|
||||
false, /* not top level */
|
||||
false, /* not top level */
|
||||
dest,
|
||||
NULL);
|
||||
/* Update "processed" if stmt returned tuples */
|
||||
@@ -1853,9 +1854,9 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
PopActiveSnapshot();
|
||||
|
||||
/*
|
||||
* The last canSetTag query sets the status values returned to
|
||||
* the caller. Be careful to free any tuptables not returned,
|
||||
* to avoid intratransaction memory leak.
|
||||
* The last canSetTag query sets the status values returned to the
|
||||
* caller. Be careful to free any tuptables not returned, to
|
||||
* avoid intratransaction memory leak.
|
||||
*/
|
||||
if (canSetTag)
|
||||
{
|
||||
@@ -1884,9 +1885,9 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
||||
cplan = NULL;
|
||||
|
||||
/*
|
||||
* If not read-only mode, advance the command counter after the
|
||||
* last command. This ensures that its effects are visible, in
|
||||
* case it was DDL that would affect the next CachedPlanSource.
|
||||
* If not read-only mode, advance the command counter after the last
|
||||
* command. This ensures that its effects are visible, in case it was
|
||||
* DDL that would affect the next CachedPlanSource.
|
||||
*/
|
||||
if (!read_only)
|
||||
CommandCounterIncrement();
|
||||
@@ -1912,9 +1913,9 @@ fail:
|
||||
_SPI_current->tuptable = NULL;
|
||||
|
||||
/*
|
||||
* If none of the queries had canSetTag, return SPI_OK_REWRITTEN. Prior
|
||||
* to 8.4, we used return the last query's result code, but not its
|
||||
* auxiliary results, but that's confusing.
|
||||
* If none of the queries had canSetTag, return SPI_OK_REWRITTEN. Prior to
|
||||
* 8.4, we used return the last query's result code, but not its auxiliary
|
||||
* results, but that's confusing.
|
||||
*/
|
||||
if (my_res == 0)
|
||||
my_res = SPI_OK_REWRITTEN;
|
||||
@@ -1938,7 +1939,7 @@ _SPI_convert_params(int nargs, Oid *argtypes,
|
||||
|
||||
/* sizeof(ParamListInfoData) includes the first array element */
|
||||
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
|
||||
(nargs - 1) *sizeof(ParamExternData));
|
||||
(nargs - 1) *sizeof(ParamExternData));
|
||||
paramLI->numParams = nargs;
|
||||
|
||||
for (i = 0; i < nargs; i++)
|
||||
|
@@ -5,7 +5,7 @@
|
||||
* a Tuplestore.
|
||||
*
|
||||
* Optionally, we can force detoasting (but not decompression) of out-of-line
|
||||
* toasted values. This is to support cursors WITH HOLD, which must retain
|
||||
* toasted values. This is to support cursors WITH HOLD, which must retain
|
||||
* data even if the underlying table is dropped.
|
||||
*
|
||||
*
|
||||
@@ -13,7 +13,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/tstoreReceiver.c,v 1.22 2009/01/01 17:23:42 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/tstoreReceiver.c,v 1.23 2009/06/11 14:48:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -121,8 +121,8 @@ tstoreReceiveSlot_detoast(TupleTableSlot *slot, DestReceiver *self)
|
||||
|
||||
/*
|
||||
* Fetch back any out-of-line datums. We build the new datums array in
|
||||
* myState->outvalues[] (but we can re-use the slot's isnull array).
|
||||
* Also, remember the fetched values to free afterwards.
|
||||
* myState->outvalues[] (but we can re-use the slot's isnull array). Also,
|
||||
* remember the fetched values to free afterwards.
|
||||
*/
|
||||
nfree = 0;
|
||||
for (i = 0; i < natts; i++)
|
||||
@@ -136,7 +136,7 @@ tstoreReceiveSlot_detoast(TupleTableSlot *slot, DestReceiver *self)
|
||||
if (VARATT_IS_EXTERNAL(DatumGetPointer(val)))
|
||||
{
|
||||
val = PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
|
||||
DatumGetPointer(val)));
|
||||
DatumGetPointer(val)));
|
||||
myState->tofree[nfree++] = val;
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user