1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-15 19:21:59 +03:00

pgindent run for 8.3.

This commit is contained in:
Bruce Momjian
2007-11-15 21:14:46 +00:00
parent 3adc760fb9
commit fdf5a5efb7
486 changed files with 10044 additions and 9664 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.110 2007/10/24 20:55:36 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.111 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -118,7 +118,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
totaldeadrows;
HeapTuple *rows;
PGRUsage ru0;
TimestampTz starttime = 0;
TimestampTz starttime = 0;
if (vacstmt->verbose)
elevel = INFO;
@ -1346,7 +1346,7 @@ typedef struct
FmgrInfo *cmpFn;
int cmpFlags;
int *tupnoLink;
} CompareScalarsContext;
} CompareScalarsContext;
static void compute_minimal_stats(VacAttrStatsP stats,

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.164 2007/09/29 18:05:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.165 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -80,7 +80,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
*
* The single-relation case does not have any such overhead.
*
* We also allow a relation to be specified without index. In that case,
* We also allow a relation to be specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@ -107,13 +107,13 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
RelationGetRelationName(rel));
/*
* Reject clustering a remote temp table ... their local buffer manager
* is not going to cope.
* Reject clustering a remote temp table ... their local buffer
* manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster temporary tables of other sessions")));
errmsg("cannot cluster temporary tables of other sessions")));
if (stmt->indexname == NULL)
{
@ -289,7 +289,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
* remote temp tables by name. There is another check in
* remote temp tables by name. There is another check in
* check_index_is_clusterable which is redundant, but we leave it for
* extra safety.
*/
@ -733,8 +733,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
/*
* compute xids used to freeze and weed out dead tuples. We use -1
* freeze_min_age to avoid having CLUSTER freeze tuples earlier than
* a plain VACUUM would.
* freeze_min_age to avoid having CLUSTER freeze tuples earlier than a
* plain VACUUM would.
*/
vacuum_set_xid_limits(-1, OldHeap->rd_rel->relisshared,
&OldestXmin, &FreezeXid);
@ -745,8 +745,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
/*
* Scan through the OldHeap in OldIndex order and copy each tuple into the
* NewHeap. To ensure we see recently-dead tuples that still need to be
* copied, we scan with SnapshotAny and use HeapTupleSatisfiesVacuum
* for the visibility test.
* copied, we scan with SnapshotAny and use HeapTupleSatisfiesVacuum for
* the visibility test.
*/
scan = index_beginscan(OldHeap, OldIndex,
SnapshotAny, 0, (ScanKey) NULL);
@ -774,31 +774,33 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
isdead = false;
break;
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
* We should not see this unless it's been inserted earlier
* in our own transaction.
* We should not see this unless it's been inserted earlier in
* our own transaction.
*/
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmin(tuple->t_data)))
HeapTupleHeaderGetXmin(tuple->t_data)))
elog(ERROR, "concurrent insert in progress");
/* treat as live */
isdead = false;
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
* We should not see this unless it's been deleted earlier
* in our own transaction.
* We should not see this unless it's been deleted earlier in
* our own transaction.
*/
Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI));
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmax(tuple->t_data)))
HeapTupleHeaderGetXmax(tuple->t_data)))
elog(ERROR, "concurrent delete in progress");
/* treat as recently dead */
isdead = false;
break;
default:
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
isdead = false; /* keep compiler quiet */
isdead = false; /* keep compiler quiet */
break;
}

View File

@ -7,7 +7,7 @@
* Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.98 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.99 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1493,7 +1493,7 @@ CommentTSParser(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to comment on text search parser")));
errmsg("must be superuser to comment on text search parser")));
CreateComments(prsId, TSParserRelationId, 0, comment);
}
@ -1522,7 +1522,7 @@ CommentTSTemplate(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to comment on text search template")));
errmsg("must be superuser to comment on text search template")));
CreateComments(tmplId, TSTemplateRelationId, 0, comment);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.287 2007/09/12 20:49:27 adunstan Exp $
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.288 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -997,7 +997,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
errmsg("COPY (SELECT) WITH OIDS is not supported")));
/*
* Run parse analysis and rewrite. Note this also acquires sufficient
* Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
@ -1638,8 +1638,8 @@ CopyFrom(CopyState cstate)
MemoryContext oldcontext = CurrentMemoryContext;
ErrorContextCallback errcontext;
CommandId mycid = GetCurrentCommandId();
bool use_wal = true; /* by default, use WAL logging */
bool use_fsm = true; /* by default, use FSM for free space */
bool use_wal = true; /* by default, use WAL logging */
bool use_fsm = true; /* by default, use FSM for free space */
Assert(cstate->rel);
@ -2148,7 +2148,7 @@ CopyFrom(CopyState cstate)
cstate->filename)));
}
/*
/*
* If we skipped writing WAL, then we need to sync the heap (but not
* indexes since those use WAL anyway)
*/
@ -2685,7 +2685,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
char *start_ptr;
char *end_ptr;
int input_len;
bool saw_high_bit = false;
bool saw_high_bit = false;
/* Make sure space remains in fieldvals[] */
if (fieldno >= maxfields)
@ -2776,7 +2776,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
}
c = val & 0xff;
if (IS_HIGHBIT_SET(c))
saw_high_bit = true;
saw_high_bit = true;
}
}
break;
@ -2804,7 +2804,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
* literally
*/
}
}
}
/* Add c to output string */
*output_ptr++ = c;
@ -2813,13 +2813,15 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
/* Terminate attribute value in output area */
*output_ptr++ = '\0';
/* If we de-escaped a char with the high bit set, make sure
* we still have valid data for the db encoding. Avoid calling strlen
* here for the sake of efficiency.
/*
* If we de-escaped a char with the high bit set, make sure we still
* have valid data for the db encoding. Avoid calling strlen here for
* the sake of efficiency.
*/
if (saw_high_bit)
{
char *fld = fieldvals[fieldno];
char *fld = fieldvals[fieldno];
pg_verifymbstr(fld, output_ptr - (fld + 1), false);
}
@ -3077,15 +3079,15 @@ CopyAttributeOutText(CopyState cstate, char *string)
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
* are infrequent. To avoid overhead from calling CopySendData once per
* character, we dump out all characters between escaped characters in
* a single call. The loop invariant is that the data from "start" to
* "ptr" can be sent literally, but hasn't yet been.
* character, we dump out all characters between escaped characters in a
* single call. The loop invariant is that the data from "start" to "ptr"
* can be sent literally, but hasn't yet been.
*
* We can skip pg_encoding_mblen() overhead when encoding is safe, because
* in valid backend encodings, extra bytes of a multibyte character never
* look like ASCII. This loop is sufficiently performance-critical that
* it's worth making two copies of it to get the IS_HIGHBIT_SET() test
* out of the normal safe-encoding path.
* it's worth making two copies of it to get the IS_HIGHBIT_SET() test out
* of the normal safe-encoding path.
*/
if (cstate->encoding_embeds_ascii)
{
@ -3096,13 +3098,16 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
start = ptr++; /* we include char in next run */
start = ptr++; /* we include char in next run */
}
else if ((unsigned char) c < (unsigned char) 0x20)
{
switch (c)
{
/* \r and \n must be escaped, the others are traditional */
/*
* \r and \n must be escaped, the others are
* traditional
*/
case '\b':
case '\f':
case '\n':
@ -3134,13 +3139,16 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
start = ptr++; /* we include char in next run */
start = ptr++; /* we include char in next run */
}
else if ((unsigned char) c < (unsigned char) 0x20)
{
switch (c)
{
/* \r and \n must be escaped, the others are traditional */
/*
* \r and \n must be escaped, the others are
* traditional
*/
case '\b':
case '\f':
case '\n':

View File

@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.202 2007/10/16 11:30:16 mha Exp $
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.203 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -260,17 +260,17 @@ createdb(const CreatedbStmt *stmt)
* Check whether encoding matches server locale settings. We allow
* mismatch in three cases:
*
* 1. ctype_encoding = SQL_ASCII, which means either that the locale
* is C/POSIX which works with any encoding, or that we couldn't determine
* 1. ctype_encoding = SQL_ASCII, which means either that the locale is
* C/POSIX which works with any encoding, or that we couldn't determine
* the locale's encoding and have to trust the user to get it right.
*
* 2. selected encoding is SQL_ASCII, but only if you're a superuser.
* This is risky but we have historically allowed it --- notably, the
* 2. selected encoding is SQL_ASCII, but only if you're a superuser. This
* is risky but we have historically allowed it --- notably, the
* regression tests require it.
*
* 3. selected encoding is UTF8 and platform is win32. This is because
* UTF8 is a pseudo codepage that is supported in all locales since
* it's converted to UTF16 before being used.
* UTF8 is a pseudo codepage that is supported in all locales since it's
* converted to UTF16 before being used.
*
* Note: if you change this policy, fix initdb to match.
*/
@ -286,8 +286,8 @@ createdb(const CreatedbStmt *stmt)
(errmsg("encoding %s does not match server's locale %s",
pg_encoding_to_char(encoding),
setlocale(LC_CTYPE, NULL)),
errdetail("The server's LC_CTYPE setting requires encoding %s.",
pg_encoding_to_char(ctype_encoding))));
errdetail("The server's LC_CTYPE setting requires encoding %s.",
pg_encoding_to_char(ctype_encoding))));
/* Resolve default tablespace for new database */
if (dtablespacename && dtablespacename->arg)
@ -313,7 +313,7 @@ createdb(const CreatedbStmt *stmt)
if (dst_deftablespace == GLOBALTABLESPACE_OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("pg_global cannot be used as default tablespace")));
errmsg("pg_global cannot be used as default tablespace")));
/*
* If we are trying to change the default tablespace of the template,
@ -375,12 +375,12 @@ createdb(const CreatedbStmt *stmt)
if (CheckOtherDBBackends(src_dboid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("source database \"%s\" is being accessed by other users",
dbtemplate)));
errmsg("source database \"%s\" is being accessed by other users",
dbtemplate)));
/*
* Select an OID for the new database, checking that it doesn't have
* a filename conflict with anything already existing in the tablespace
* Select an OID for the new database, checking that it doesn't have a
* filename conflict with anything already existing in the tablespace
* directories.
*/
pg_database_rel = heap_open(DatabaseRelationId, RowExclusiveLock);
@ -558,9 +558,9 @@ createdb(const CreatedbStmt *stmt)
/*
* Set flag to update flat database file at commit. Note: this also
* forces synchronous commit, which minimizes the window between
* creation of the database files and commital of the transaction.
* If we crash before committing, we'll have a DB that's taking up
* disk space but is not in pg_database, which is not good.
* creation of the database files and commital of the transaction. If
* we crash before committing, we'll have a DB that's taking up disk
* space but is not in pg_database, which is not good.
*/
database_file_update_needed();
}
@ -721,10 +721,10 @@ dropdb(const char *dbname, bool missing_ok)
/*
* Set flag to update flat database file at commit. Note: this also
* forces synchronous commit, which minimizes the window between
* removal of the database files and commital of the transaction.
* If we crash before committing, we'll have a DB that's gone on disk
* but still there according to pg_database, which is not good.
* forces synchronous commit, which minimizes the window between removal
* of the database files and commital of the transaction. If we crash
* before committing, we'll have a DB that's gone on disk but still there
* according to pg_database, which is not good.
*/
database_file_update_needed();
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/discard.c,v 1.1 2007/04/26 16:13:10 neilc Exp $
* $PostgreSQL: pgsql/src/backend/commands/discard.c,v 1.2 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -28,7 +28,7 @@ static void DiscardAll(bool isTopLevel);
* DISCARD { ALL | TEMP | PLANS }
*/
void
DiscardCommand(DiscardStmt *stmt, bool isTopLevel)
DiscardCommand(DiscardStmt * stmt, bool isTopLevel)
{
switch (stmt->target)
{
@ -54,10 +54,10 @@ DiscardAll(bool isTopLevel)
{
/*
* Disallow DISCARD ALL in a transaction block. This is arguably
* inconsistent (we don't make a similar check in the command
* sequence that DISCARD ALL is equivalent to), but the idea is
* to catch mistakes: DISCARD ALL inside a transaction block
* would leave the transaction still uncommitted.
* inconsistent (we don't make a similar check in the command sequence
* that DISCARD ALL is equivalent to), but the idea is to catch mistakes:
* DISCARD ALL inside a transaction block would leave the transaction
* still uncommitted.
*/
PreventTransactionChain(isTopLevel, "DISCARD ALL");

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.165 2007/08/15 21:39:50 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.166 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -35,6 +35,7 @@
/* Hook for plugins to get control in ExplainOneQuery() */
ExplainOneQuery_hook_type ExplainOneQuery_hook = NULL;
/* Hook for plugins to get control in explain_get_index_name() */
explain_get_index_name_hook_type explain_get_index_name_hook = NULL;
@ -50,10 +51,10 @@ typedef struct ExplainState
} ExplainState;
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
const char *queryString,
ParamListInfo params, TupOutputState *tstate);
const char *queryString,
ParamListInfo params, TupOutputState *tstate);
static void report_triggers(ResultRelInfo *rInfo, bool show_relname,
StringInfo buf);
StringInfo buf);
static double elapsed_time(instr_time *starttime);
static void explain_outNode(StringInfo str,
Plan *plan, PlanState *planstate,
@ -90,14 +91,14 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
getParamListTypes(params, &param_types, &num_params);
/*
* Run parse analysis and rewrite. Note this also acquires sufficient
* Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
* make a preliminary copy of the source querytree. This prevents
* problems in the case that the EXPLAIN is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
* DECLARE CURSOR and PREPARE.) XXX FIXME someday.
* Because the parser and planner tend to scribble on their input, we make
* a preliminary copy of the source querytree. This prevents problems in
* the case that the EXPLAIN is in a portal or plpgsql function and is
* executed repeatedly. (See also the same hack in DECLARE CURSOR and
* PREPARE.) XXX FIXME someday.
*/
rewritten = pg_analyze_and_rewrite((Node *) copyObject(stmt->query),
queryString, param_types, num_params);
@ -215,7 +216,7 @@ ExplainOneUtility(Node *utilityStmt, ExplainStmt *stmt,
* to call it.
*/
void
ExplainOnePlan(PlannedStmt *plannedstmt, ParamListInfo params,
ExplainOnePlan(PlannedStmt * plannedstmt, ParamListInfo params,
ExplainStmt *stmt, TupOutputState *tstate)
{
QueryDesc *queryDesc;
@ -376,8 +377,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, StringInfo buf)
InstrEndLoop(instr);
/*
* We ignore triggers that were never invoked; they likely
* aren't relevant to the current query type.
* We ignore triggers that were never invoked; they likely aren't
* relevant to the current query type.
*/
if (instr->ntuples == 0)
continue;
@ -624,7 +625,7 @@ explain_outNode(StringInfo str,
if (ScanDirectionIsBackward(((IndexScan *) plan)->indexorderdir))
appendStringInfoString(str, " Backward");
appendStringInfo(str, " using %s",
explain_get_index_name(((IndexScan *) plan)->indexid));
explain_get_index_name(((IndexScan *) plan)->indexid));
/* FALL THRU */
case T_SeqScan:
case T_BitmapHeapScan:
@ -1137,7 +1138,7 @@ show_sort_keys(Plan *sortplan, int nkeys, AttrNumber *keycols,
/* Set up deparsing context */
context = deparse_context_for_plan((Node *) outerPlan(sortplan),
NULL, /* Sort has no innerPlan */
NULL, /* Sort has no innerPlan */
es->rtable);
useprefix = list_length(es->rtable) > 1;
@ -1192,7 +1193,7 @@ show_sort_info(SortState *sortstate,
static const char *
explain_get_index_name(Oid indexId)
{
const char *result;
const char *result;
if (explain_get_index_name_hook)
result = (*explain_get_index_name_hook) (indexId);

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.86 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.87 2007/11/15 21:14:33 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@ -56,7 +56,7 @@
static void AlterFunctionOwner_internal(Relation rel, HeapTuple tup,
Oid newOwnerId);
Oid newOwnerId);
/*
@ -121,8 +121,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (returnType->typmods != NIL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("type modifier cannot be specified for shell type \"%s\"",
typnam)));
errmsg("type modifier cannot be specified for shell type \"%s\"",
typnam)));
/* Otherwise, go ahead and make a shell type */
ereport(NOTICE,
@ -285,7 +285,7 @@ examine_parameter_list(List *parameters, Oid languageOid,
* FUNCTION and ALTER FUNCTION and return it via one of the out
* parameters. Returns true if the passed option was recognized. If
* the out parameter we were going to assign to points to non-NULL,
* raise a duplicate-clause error. (We don't try to detect duplicate
* raise a duplicate-clause error. (We don't try to detect duplicate
* SET parameters though --- if you're redundant, the last one wins.)
*/
static bool
@ -390,7 +390,7 @@ update_proconfig_value(ArrayType *a, List *set_items)
if (valuestr)
a = GUCArrayAdd(a, sstmt->name, valuestr);
else /* RESET */
else /* RESET */
a = GUCArrayDelete(a, sstmt->name);
}
}
@ -1598,9 +1598,9 @@ DropCast(DropCastStmt *stmt)
TypeNameToString(stmt->targettype))));
else
ereport(NOTICE,
(errmsg("cast from type %s to type %s does not exist, skipping",
TypeNameToString(stmt->sourcetype),
TypeNameToString(stmt->targettype))));
(errmsg("cast from type %s to type %s does not exist, skipping",
TypeNameToString(stmt->sourcetype),
TypeNameToString(stmt->targettype))));
return;
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.166 2007/09/20 17:56:31 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.167 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -396,10 +396,9 @@ DefineIndex(RangeVar *heapRelation,
}
/*
* Parse AM-specific options, convert to text array form,
* validate. The src_options introduced due to using indexes
* via the "CREATE LIKE INCLUDING INDEXES" statement also need to
* be merged here
* Parse AM-specific options, convert to text array form, validate. The
* src_options introduced due to using indexes via the "CREATE LIKE
* INCLUDING INDEXES" statement also need to be merged here
*/
if (src_options)
reloptions = unflatten_reloptions(src_options);
@ -452,7 +451,7 @@ DefineIndex(RangeVar *heapRelation,
{
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
coloptions, reloptions, primary, isconstraint,
allowSystemTableMods, skip_build, concurrent);
@ -461,18 +460,18 @@ DefineIndex(RangeVar *heapRelation,
/*
* For a concurrent build, we next insert the catalog entry and add
* constraints. We don't build the index just yet; we must first make
* the catalog entry so that the new index is visible to updating
* constraints. We don't build the index just yet; we must first make the
* catalog entry so that the new index is visible to updating
* transactions. That will prevent them from making incompatible HOT
* updates. The new index will be marked not indisready and not
* indisvalid, so that no one else tries to either insert into it or use
* it for queries. We pass skip_build = true to prevent the build.
* it for queries. We pass skip_build = true to prevent the build.
*/
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
coloptions, reloptions, primary, isconstraint,
allowSystemTableMods, true, concurrent);
allowSystemTableMods, true, concurrent);
/*
* We must commit our current transaction so that the index becomes
@ -506,15 +505,15 @@ DefineIndex(RangeVar *heapRelation,
* xacts that open the table for writing after this point; they will see
* the new index when they open it.
*
* Note: the reason we use actual lock acquisition here, rather than
* just checking the ProcArray and sleeping, is that deadlock is possible
* if one of the transactions in question is blocked trying to acquire
* an exclusive lock on our table. The lock code will detect deadlock
* and error out properly.
* Note: the reason we use actual lock acquisition here, rather than just
* checking the ProcArray and sleeping, is that deadlock is possible if
* one of the transactions in question is blocked trying to acquire an
* exclusive lock on our table. The lock code will detect deadlock and
* error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
* check for that. Also, prepared xacts are not reported, which is
* fine since they certainly aren't going to do anything more.
* check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
@ -530,15 +529,15 @@ DefineIndex(RangeVar *heapRelation,
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
* deciding HOT-safety though. This arrangement ensures that no new HOT
* deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
* We now take a new snapshot, and build the index using all tuples that
* are visible in this snapshot. We can be sure that any HOT updates
* to these tuples will be compatible with the index, since any updates
* made by transactions that didn't know about the index are now committed
* or rolled back. Thus, each visible tuple is either the end of its
* are visible in this snapshot. We can be sure that any HOT updates to
* these tuples will be compatible with the index, since any updates made
* by transactions that didn't know about the index are now committed or
* rolled back. Thus, each visible tuple is either the end of its
* HOT-chain or the extension of the chain is HOT-safe for this index.
*/
@ -565,10 +564,9 @@ DefineIndex(RangeVar *heapRelation,
index_close(indexRelation, NoLock);
/*
* Update the pg_index row to mark the index as ready for inserts.
* Once we commit this transaction, any new transactions that
* open the table must insert new entries into the index for insertions
* and non-HOT updates.
* Update the pg_index row to mark the index as ready for inserts. Once we
* commit this transaction, any new transactions that open the table must
* insert new entries into the index for insertions and non-HOT updates.
*/
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
@ -611,8 +609,8 @@ DefineIndex(RangeVar *heapRelation,
/*
* Now take the "reference snapshot" that will be used by validate_index()
* to filter candidate tuples. Beware! There might still be snapshots
* in use that treat some transaction as in-progress that our reference
* to filter candidate tuples. Beware! There might still be snapshots in
* use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
* those transactions which see the deleting one as still-in-progress will
@ -636,15 +634,15 @@ DefineIndex(RangeVar *heapRelation,
* The index is now valid in the sense that it contains all currently
* interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots. Obtain a list of
* VXIDs of such transactions, and wait for them individually.
* transactions that might have older snapshots. Obtain a list of VXIDs
* of such transactions, and wait for them individually.
*
* We can exclude any running transactions that have xmin >= the xmax of
* our reference snapshot, since they are clearly not interested in any
* missing older tuples. Transactions in other DBs aren't a problem
* either, since they'll never even be able to see this index.
* Also, GetCurrentVirtualXIDs never reports our own vxid, so we
* need not check for that.
* either, since they'll never even be able to see this index. Also,
* GetCurrentVirtualXIDs never reports our own vxid, so we need not check
* for that.
*/
old_snapshots = GetCurrentVirtualXIDs(ActiveSnapshot->xmax, false);
@ -681,8 +679,8 @@ DefineIndex(RangeVar *heapRelation,
* relcache entries for the index itself, but we should also send a
* relcache inval on the parent table to force replanning of cached plans.
* Otherwise existing sessions might fail to use the new index where it
* would be useful. (Note that our earlier commits did not create
* reasons to replan; relcache flush on the index itself was sufficient.)
* would be useful. (Note that our earlier commits did not create reasons
* to replan; relcache flush on the index itself was sufficient.)
*/
CacheInvalidateRelcacheByRelid(heaprelid.relId);
@ -837,9 +835,9 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
accessMethodId);
/*
* Set up the per-column options (indoption field). For now, this
* is zero for any un-ordered index, while ordered indexes have DESC
* and NULLS FIRST/LAST options.
* Set up the per-column options (indoption field). For now, this is
* zero for any un-ordered index, while ordered indexes have DESC and
* NULLS FIRST/LAST options.
*/
colOptionP[attn] = 0;
if (amcanorder)

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.55 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.56 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -52,33 +52,33 @@ typedef struct
Oid lefttype; /* lefttype */
Oid righttype; /* righttype */
bool recheck; /* oper recheck flag (unused for proc) */
} OpFamilyMember;
} OpFamilyMember;
static void AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
int maxOpNumber, int maxProcNumber,
List *items);
static void AlterOpFamilyDrop(List *opfamilyname, Oid amoid, Oid opfamilyoid,
int maxOpNumber, int maxProcNumber,
List *items);
int maxOpNumber, int maxProcNumber,
List *items);
static void processTypesSpec(List *args, Oid *lefttype, Oid *righttype);
static void assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid);
static void assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid);
static void addFamilyMember(List **list, OpFamilyMember *member, bool isProc);
static void assignOperTypes(OpFamilyMember * member, Oid amoid, Oid typeoid);
static void assignProcTypes(OpFamilyMember * member, Oid amoid, Oid typeoid);
static void addFamilyMember(List **list, OpFamilyMember * member, bool isProc);
static void storeOperators(List *opfamilyname, Oid amoid,
Oid opfamilyoid, Oid opclassoid,
List *operators, bool isAdd);
Oid opfamilyoid, Oid opclassoid,
List *operators, bool isAdd);
static void storeProcedures(List *opfamilyname, Oid amoid,
Oid opfamilyoid, Oid opclassoid,
List *procedures, bool isAdd);
Oid opfamilyoid, Oid opclassoid,
List *procedures, bool isAdd);
static void dropOperators(List *opfamilyname, Oid amoid, Oid opfamilyoid,
List *operators);
List *operators);
static void dropProcedures(List *opfamilyname, Oid amoid, Oid opfamilyoid,
List *procedures);
List *procedures);
static void AlterOpClassOwner_internal(Relation rel, HeapTuple tuple,
Oid newOwnerId);
static void AlterOpFamilyOwner_internal(Relation rel, HeapTuple tuple,
Oid newOwnerId);
Oid newOwnerId);
/*
@ -111,7 +111,7 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname)
else
{
/* Unqualified opfamily name, so search the search path */
Oid opfID = OpfamilynameGetOpfid(amID, opfname);
Oid opfID = OpfamilynameGetOpfid(amID, opfname);
if (!OidIsValid(opfID))
return NULL;
@ -151,7 +151,7 @@ OpClassCacheLookup(Oid amID, List *opclassname)
else
{
/* Unqualified opclass name, so search the search path */
Oid opcID = OpclassnameGetOpcid(amID, opcname);
Oid opcID = OpclassnameGetOpcid(amID, opcname);
if (!OidIsValid(opcID))
return NULL;
@ -348,8 +348,9 @@ DefineOpClass(CreateOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
NameListToString(stmt->opfamilyname), stmt->amname)));
NameListToString(stmt->opfamilyname), stmt->amname)));
opfamilyoid = HeapTupleGetOid(tup);
/*
* XXX given the superuser check above, there's no need for an
* ownership check here
@ -367,6 +368,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (HeapTupleIsValid(tup))
{
opfamilyoid = HeapTupleGetOid(tup);
/*
* XXX given the superuser check above, there's no need for an
* ownership check here
@ -597,7 +599,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opclassoid, procedures, false);
/*
* Create dependencies for the opclass proper. Note: we do not create a
* Create dependencies for the opclass proper. Note: we do not create a
* dependency link to the AM, because we don't currently support DROP
* ACCESS METHOD.
*/
@ -644,7 +646,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* Define a new index operator family.
*/
void
DefineOpFamily(CreateOpFamilyStmt *stmt)
DefineOpFamily(CreateOpFamilyStmt * stmt)
{
char *opfname; /* name of opfamily we're creating */
Oid amoid, /* our AM's oid */
@ -686,8 +688,8 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
ReleaseSysCache(tup);
/*
* Currently, we require superuser privileges to create an opfamily.
* See comments in DefineOpClass.
* Currently, we require superuser privileges to create an opfamily. See
* comments in DefineOpClass.
*
* XXX re-enable NOT_USED code sections below if you remove this test.
*/
@ -763,7 +765,7 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
* different code paths.
*/
void
AlterOpFamily(AlterOpFamilyStmt *stmt)
AlterOpFamily(AlterOpFamilyStmt * stmt)
{
Oid amoid, /* our AM's oid */
opfamilyoid; /* oid of opfamily */
@ -876,7 +878,7 @@ AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("operator argument types must be specified in ALTER OPERATOR FAMILY")));
operOid = InvalidOid; /* keep compiler quiet */
operOid = InvalidOid; /* keep compiler quiet */
}
#ifdef NOT_USED
@ -932,7 +934,7 @@ AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
case OPCLASS_ITEM_STORAGETYPE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("STORAGE cannot be specified in ALTER OPERATOR FAMILY")));
errmsg("STORAGE cannot be specified in ALTER OPERATOR FAMILY")));
break;
default:
elog(ERROR, "unrecognized item type: %d", item->itemtype);
@ -1057,7 +1059,7 @@ processTypesSpec(List *args, Oid *lefttype, Oid *righttype)
* and do any validity checking we can manage.
*/
static void
assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
assignOperTypes(OpFamilyMember * member, Oid amoid, Oid typeoid)
{
Operator optup;
Form_pg_operator opform;
@ -1098,7 +1100,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
* and do any validity checking we can manage.
*/
static void
assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
assignProcTypes(OpFamilyMember * member, Oid amoid, Oid typeoid)
{
HeapTuple proctup;
Form_pg_proc procform;
@ -1156,10 +1158,10 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
else
{
/*
* The default for GiST and GIN in CREATE OPERATOR CLASS is to use
* the class' opcintype as lefttype and righttype. In CREATE or
* ALTER OPERATOR FAMILY, opcintype isn't available, so make the
* user specify the types.
* The default for GiST and GIN in CREATE OPERATOR CLASS is to use the
* class' opcintype as lefttype and righttype. In CREATE or ALTER
* OPERATOR FAMILY, opcintype isn't available, so make the user
* specify the types.
*/
if (!OidIsValid(member->lefttype))
member->lefttype = typeoid;
@ -1179,7 +1181,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
* duplicated strategy or proc number.
*/
static void
addFamilyMember(List **list, OpFamilyMember *member, bool isProc)
addFamilyMember(List **list, OpFamilyMember * member, bool isProc)
{
ListCell *l;
@ -1560,7 +1562,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
* Deletes an opfamily.
*/
void
RemoveOpFamily(RemoveOpFamilyStmt *stmt)
RemoveOpFamily(RemoveOpFamilyStmt * stmt)
{
Oid amID,
opfID;
@ -1589,11 +1591,11 @@ RemoveOpFamily(RemoveOpFamilyStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
NameListToString(stmt->opfamilyname), stmt->amname)));
NameListToString(stmt->opfamilyname), stmt->amname)));
else
ereport(NOTICE,
(errmsg("operator family \"%s\" does not exist for access method \"%s\"",
NameListToString(stmt->opfamilyname), stmt->amname)));
NameListToString(stmt->opfamilyname), stmt->amname)));
return;
}
@ -2120,7 +2122,7 @@ AlterOpFamilyOwner(List *name, const char *access_method, Oid newOwnerId)
}
/*
* The first parameter is pg_opfamily, opened and suitably locked. The second
* The first parameter is pg_opfamily, opened and suitably locked. The second
* parameter is a copy of the tuple from pg_opfamily we want to modify.
*/
static void

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.37 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.38 2007/11/15 21:14:33 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -65,7 +65,7 @@ DefineOperator(List *names, List *parameters)
Oid oprNamespace;
AclResult aclresult;
bool canMerge = false; /* operator merges */
bool canHash = false; /* operator hashes */
bool canHash = false; /* operator hashes */
List *functionName = NIL; /* function for operator */
TypeName *typeName1 = NULL; /* first type name */
TypeName *typeName2 = NULL; /* second type name */

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.66 2007/10/24 23:27:08 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.67 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,7 +39,7 @@
* utilityStmt field is set.
*/
void
PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
PerformCursorOpen(PlannedStmt * stmt, ParamListInfo params,
const char *queryString, bool isTopLevel)
{
DeclareCursorStmt *cstmt = (DeclareCursorStmt *) stmt->utilityStmt;
@ -102,7 +102,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
* based on whether it would require any additional runtime overhead to do
* so. Also, we disallow scrolling for FOR UPDATE cursors.
* so. Also, we disallow scrolling for FOR UPDATE cursors.
*/
portal->cursorOptions = cstmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@ -369,8 +369,8 @@ PersistHoldablePortal(Portal portal)
* to be at, but the tuplestore API doesn't support that. So we start
* at the beginning of the tuplestore and iterate through it until we
* reach where we need to be. FIXME someday? (Fortunately, the
* typical case is that we're supposed to be at or near the start
* of the result set, so this isn't as bad as it sounds.)
* typical case is that we're supposed to be at or near the start of
* the result set, so this isn't as bad as it sounds.)
*/
MemoryContextSwitchTo(portal->holdContext);
@ -378,7 +378,7 @@ PersistHoldablePortal(Portal portal)
{
/* we can handle this case even if posOverflow */
while (tuplestore_advance(portal->holdStore, true))
/* continue */ ;
/* continue */ ;
}
else
{

View File

@ -10,7 +10,7 @@
* Copyright (c) 2002-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.78 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.79 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -44,7 +44,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static ParamListInfo EvaluateParams(PreparedStatement *pstmt, List *params,
const char *queryString, EState *estate);
const char *queryString, EState *estate);
static Datum build_regtype_array(Oid *param_types, int num_params);
/*
@ -101,8 +101,8 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
* passed in from above us will not be visible to it), allowing
* information about unknown parameters to be deduced from context.
*
* Because parse analysis scribbles on the raw querytree, we must make
* a copy to ensure we have a pristine raw tree to cache. FIXME someday.
* Because parse analysis scribbles on the raw querytree, we must make a
* copy to ensure we have a pristine raw tree to cache. FIXME someday.
*/
query = parse_analyze_varparams((Node *) copyObject(stmt->query),
queryString,
@ -155,7 +155,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
CreateCommandTag((Node *) query),
argtypes,
nargs,
0, /* default cursor options */
0, /* default cursor options */
plan_list,
true);
}
@ -299,8 +299,8 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
if (nparams != num_params)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("wrong number of parameters for prepared statement \"%s\"",
pstmt->stmt_name),
errmsg("wrong number of parameters for prepared statement \"%s\"",
pstmt->stmt_name),
errdetail("Expected %d parameters but got %d.",
num_params, nparams)));
@ -309,8 +309,8 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
return NULL;
/*
* We have to run parse analysis for the expressions. Since the
* parser is not cool about scribbling on its input, copy first.
* We have to run parse analysis for the expressions. Since the parser is
* not cool about scribbling on its input, copy first.
*/
params = (List *) copyObject(params);
@ -334,7 +334,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
errmsg("cannot use aggregate function in EXECUTE parameter")));
errmsg("cannot use aggregate function in EXECUTE parameter")));
given_type_id = exprType(expr);
@ -350,7 +350,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
i + 1,
format_type_be(given_type_id),
format_type_be(expected_type_id)),
errhint("You will need to rewrite or cast the expression.")));
errhint("You will need to rewrite or cast the expression.")));
lfirst(l) = expr;
i++;
@ -734,8 +734,8 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
oldcontext = MemoryContextSwitchTo(per_query_ctx);
/*
* build tupdesc for result tuples. This must match the definition of
* the pg_prepared_statements view in system_views.sql
* build tupdesc for result tuples. This must match the definition of the
* pg_prepared_statements view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(5, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@ -780,11 +780,11 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
CStringGetDatum(prep_stmt->plansource->query_string));
CStringGetDatum(prep_stmt->plansource->query_string));
values[2] = TimestampTzGetDatum(prep_stmt->prepare_time);
values[3] = build_regtype_array(prep_stmt->plansource->param_types,
prep_stmt->plansource->num_params);
prep_stmt->plansource->num_params);
values[4] = BoolGetDatum(prep_stmt->from_sql);
tuple = heap_form_tuple(tupdesc, values, nulls);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.46 2007/06/23 22:12:50 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.47 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -111,17 +111,17 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/*
* Examine the list of commands embedded in the CREATE SCHEMA command, and
* reorganize them into a sequentially executable order with no forward
* references. Note that the result is still a list of raw parsetrees
* --- we cannot, in general, run parse analysis on one statement until
* we have actually executed the prior ones.
* references. Note that the result is still a list of raw parsetrees ---
* we cannot, in general, run parse analysis on one statement until we
* have actually executed the prior ones.
*/
parsetree_list = transformCreateSchemaStmt(stmt);
/*
* Execute each command contained in the CREATE SCHEMA. Since the
* grammar allows only utility commands in CREATE SCHEMA, there is
* no need to pass them through parse_analyze() or the rewriter;
* we can just hand them straight to ProcessUtility.
* Execute each command contained in the CREATE SCHEMA. Since the grammar
* allows only utility commands in CREATE SCHEMA, there is no need to pass
* them through parse_analyze() or the rewriter; we can just hand them
* straight to ProcessUtility.
*/
foreach(parsetree_item, parsetree_list)
{
@ -131,7 +131,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
ProcessUtility(stmt,
queryString,
NULL,
false, /* not top level */
false, /* not top level */
None_Receiver,
NULL);
/* make sure later steps can see the object created here */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.147 2007/10/25 18:54:03 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.148 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1145,8 +1145,8 @@ init_params(List *options, bool isInit,
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("START value (%s) cannot be greater than MAXVALUE (%s)",
bufs, bufm)));
errmsg("START value (%s) cannot be greater than MAXVALUE (%s)",
bufs, bufm)));
}
/* CACHE */
@ -1221,7 +1221,7 @@ process_owned_by(Relation seqrel, List *owned_by)
if (seqrel->rd_rel->relowner != tablerel->rd_rel->relowner)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("sequence must have same owner as table it is linked to")));
errmsg("sequence must have same owner as table it is linked to")));
if (RelationGetNamespace(seqrel) != RelationGetNamespace(tablerel))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.235 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.236 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -169,7 +169,7 @@ static List *MergeAttributes(List *schema, List *supers, bool istemp,
static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel);
static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel);
static void add_nonduplicate_constraint(Constraint *cdef,
ConstrCheck *check, int *ncheck);
ConstrCheck *check, int *ncheck);
static bool change_varattnos_walker(Node *node, const AttrNumber *newattno);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
@ -256,7 +256,7 @@ static void ATExecSetRelOptions(Relation rel, List *defList, bool isReset);
static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
char fires_when, bool skip_system);
static void ATExecEnableDisableRule(Relation rel, char *rulename,
char fires_when);
char fires_when);
static void ATExecAddInherit(Relation rel, RangeVar *parent);
static void ATExecDropInherit(Relation rel, RangeVar *parent);
static void copy_relation_data(Relation rel, SMgrRelation dst);
@ -395,6 +395,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (cdef->contype == CONSTR_CHECK)
add_nonduplicate_constraint(cdef, check, &ncheck);
}
/*
* parse_utilcmd.c might have passed some precooked constraints too,
* due to LIKE tab INCLUDING CONSTRAINTS
@ -841,8 +842,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (list_member_oid(parentOids, RelationGetRelid(relation)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" would be inherited from more than once",
parent->relname)));
errmsg("relation \"%s\" would be inherited from more than once",
parent->relname)));
parentOids = lappend_oid(parentOids, RelationGetRelid(relation));
@ -888,8 +889,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
exist_attno = findAttrByName(attributeName, inhSchema);
if (exist_attno > 0)
{
Oid defTypeId;
int32 deftypmod;
Oid defTypeId;
int32 deftypmod;
/*
* Yes, try to merge the two column definitions. They must
@ -1032,8 +1033,10 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (exist_attno > 0)
{
ColumnDef *def;
Oid defTypeId, newTypeId;
int32 deftypmod, newtypmod;
Oid defTypeId,
newTypeId;
int32 deftypmod,
newtypmod;
/*
* Yes, try to merge the two column definitions. They must
@ -1632,8 +1635,8 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
bool relhastriggers;
/*
* Grab an exclusive lock on the target table, index, sequence or
* view, which we will NOT release until end of transaction.
* Grab an exclusive lock on the target table, index, sequence or view,
* which we will NOT release until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
@ -1647,9 +1650,8 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
RelationGetRelationName(targetrelation))));
/*
* For compatibility with prior releases, we don't complain if
* ALTER TABLE or ALTER INDEX is used to rename a sequence or
* view.
* For compatibility with prior releases, we don't complain if ALTER TABLE
* or ALTER INDEX is used to rename a sequence or view.
*/
relkind = targetrelation->rd_rel->relkind;
if (reltype == OBJECT_SEQUENCE && relkind != 'S')
@ -1746,19 +1748,19 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
void
AlterTable(AlterTableStmt *stmt)
{
Relation rel = relation_openrv(stmt->relation, AccessExclusiveLock);
Relation rel = relation_openrv(stmt->relation, AccessExclusiveLock);
int expected_refcnt;
/*
* Disallow ALTER TABLE when the current backend has any open reference
* to it besides the one we just got (such as an open cursor or active
* plan); our AccessExclusiveLock doesn't protect us against stomping on
* our own foot, only other people's feet!
* Disallow ALTER TABLE when the current backend has any open reference to
* it besides the one we just got (such as an open cursor or active plan);
* our AccessExclusiveLock doesn't protect us against stomping on our own
* foot, only other people's feet!
*
* Note: the only case known to cause serious trouble is ALTER COLUMN TYPE,
* and some changes are obviously pretty benign, so this could possibly
* be relaxed to only error out for certain types of alterations. But
* the use-case for allowing any of these things is not obvious, so we
* Note: the only case known to cause serious trouble is ALTER COLUMN
* TYPE, and some changes are obviously pretty benign, so this could
* possibly be relaxed to only error out for certain types of alterations.
* But the use-case for allowing any of these things is not obvious, so we
* won't work hard at it for now.
*/
expected_refcnt = rel->rd_isnailed ? 2 : 1;
@ -1784,7 +1786,7 @@ AlterTable(AlterTableStmt *stmt)
void
AlterTableInternal(Oid relid, List *cmds, bool recurse)
{
Relation rel = relation_open(relid, AccessExclusiveLock);
Relation rel = relation_open(relid, AccessExclusiveLock);
ATController(rel, cmds, recurse);
}
@ -2153,54 +2155,54 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
ATExecSetRelOptions(rel, (List *) cmd->def, true);
break;
case AT_EnableTrig: /* ENABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_ORIGIN, false);
case AT_EnableTrig: /* ENABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_ORIGIN, false);
break;
case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ALWAYS, false);
case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ALWAYS, false);
break;
case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_REPLICA, false);
case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_REPLICA, false);
break;
case AT_DisableTrig: /* DISABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_DISABLED, false);
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_DISABLED, false);
break;
case AT_EnableTrigAll: /* ENABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, false);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, false);
break;
case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, false);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, false);
break;
case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, true);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, true);
break;
case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, true);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, true);
break;
case AT_EnableRule: /* ENABLE RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_ORIGIN);
case AT_EnableRule: /* ENABLE RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_ORIGIN);
break;
case AT_EnableAlwaysRule: /* ENABLE ALWAYS RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ALWAYS);
case AT_EnableAlwaysRule: /* ENABLE ALWAYS RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ALWAYS);
break;
case AT_EnableReplicaRule: /* ENABLE REPLICA RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_REPLICA);
case AT_EnableReplicaRule: /* ENABLE REPLICA RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_REPLICA);
break;
case AT_DisableRule: /* DISABLE RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_DISABLED);
ATExecEnableDisableRule(rel, cmd->name,
RULE_DISABLED);
break;
case AT_AddInherit:
@ -2303,8 +2305,8 @@ ATRewriteTables(List **wqueue)
/*
* Swap the physical files of the old and new heaps. Since we are
* generating a new heap, we can use RecentXmin for the table's new
* relfrozenxid because we rewrote all the tuples on
* generating a new heap, we can use RecentXmin for the table's
* new relfrozenxid because we rewrote all the tuples on
* ATRewriteTable, so no older Xid remains on the table.
*/
swap_relation_files(tab->relid, OIDNewHeap, RecentXmin);
@ -3011,8 +3013,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
if (HeapTupleIsValid(tuple))
{
Form_pg_attribute childatt = (Form_pg_attribute) GETSTRUCT(tuple);
Oid ctypeId;
int32 ctypmod;
Oid ctypeId;
int32 ctypmod;
/* Okay if child matches by type */
ctypeId = typenameTypeId(NULL, colDef->typename, &ctypmod);
@ -3819,8 +3821,8 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint)
/*
* Currently, we only expect to see CONSTR_CHECK nodes
* arriving here (see the preprocessing done in
* parse_utilcmd.c). Use a switch anyway to make it easier
* to add more code later.
* parse_utilcmd.c). Use a switch anyway to make it easier to
* add more code later.
*/
switch (constr->contype)
{
@ -4030,7 +4032,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* Note that we have to be careful about the difference between the actual
* PK column type and the opclass' declared input type, which might be
* only binary-compatible with it. The declared opcintype is the right
* only binary-compatible with it. The declared opcintype is the right
* thing to probe pg_amop with.
*/
if (numfks != numpks)
@ -4067,10 +4069,10 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Check it's a btree; currently this can never fail since no other
* index AMs support unique indexes. If we ever did have other
* types of unique indexes, we'd need a way to determine which
* operator strategy number is equality. (Is it reasonable to
* insist that every such index AM use btree's number for equality?)
* index AMs support unique indexes. If we ever did have other types
* of unique indexes, we'd need a way to determine which operator
* strategy number is equality. (Is it reasonable to insist that
* every such index AM use btree's number for equality?)
*/
if (amid != BTREE_AM_OID)
elog(ERROR, "only b-tree indexes are supported for foreign keys");
@ -4088,8 +4090,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
eqstrategy, opcintype, opcintype, opfamily);
/*
* Are there equality operators that take exactly the FK type?
* Assume we should look through any domain here.
* Are there equality operators that take exactly the FK type? Assume
* we should look through any domain here.
*/
fktyped = getBaseType(fktype);
@ -4099,21 +4101,21 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
ffeqop = get_opfamily_member(opfamily, fktyped, fktyped,
eqstrategy);
else
ffeqop = InvalidOid; /* keep compiler quiet */
ffeqop = InvalidOid; /* keep compiler quiet */
if (!(OidIsValid(pfeqop) && OidIsValid(ffeqop)))
{
/*
* Otherwise, look for an implicit cast from the FK type to
* the opcintype, and if found, use the primary equality operator.
* Otherwise, look for an implicit cast from the FK type to the
* opcintype, and if found, use the primary equality operator.
* This is a bit tricky because opcintype might be a generic type
* such as ANYARRAY, and so what we have to test is whether the
* two actual column types can be concurrently cast to that type.
* (Otherwise, we'd fail to reject combinations such as int[] and
* point[].)
*/
Oid input_typeids[2];
Oid target_typeids[2];
Oid input_typeids[2];
Oid target_typeids[2];
input_typeids[0] = pktype;
input_typeids[1] = fktype;
@ -5255,10 +5257,10 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
ListCell *list_item;
/*
* We expect that we will get only ALTER TABLE and CREATE INDEX statements.
* Hence, there is no need to pass them through parse_analyze() or the
* rewriter, but instead we need to pass them through parse_utilcmd.c
* to make them ready for execution.
* We expect that we will get only ALTER TABLE and CREATE INDEX
* statements. Hence, there is no need to pass them through
* parse_analyze() or the rewriter, but instead we need to pass them
* through parse_utilcmd.c to make them ready for execution.
*/
raw_parsetree_list = raw_parser(cmd);
querytree_list = NIL;
@ -5272,8 +5274,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
cmd));
else if (IsA(stmt, AlterTableStmt))
querytree_list = list_concat(querytree_list,
transformAlterTableStmt((AlterTableStmt *) stmt,
cmd));
transformAlterTableStmt((AlterTableStmt *) stmt,
cmd));
else
querytree_list = lappend(querytree_list, stmt);
}
@ -5528,7 +5530,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
*/
if (tuple_class->relkind != RELKIND_INDEX)
AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId,
tuple_class->relkind == RELKIND_COMPOSITE_TYPE);
tuple_class->relkind == RELKIND_COMPOSITE_TYPE);
/*
* If we are operating on a table, also change the ownership of any
@ -5983,7 +5985,7 @@ ATExecEnableDisableTrigger(Relation rel, char *trigname,
*/
static void
ATExecEnableDisableRule(Relation rel, char *trigname,
char fires_when)
char fires_when)
{
EnableDisableRule(rel, trigname, fires_when);
}
@ -6051,8 +6053,8 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
if (inh->inhparent == RelationGetRelid(parent_rel))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" would be inherited from more than once",
RelationGetRelationName(parent_rel))));
errmsg("relation \"%s\" would be inherited from more than once",
RelationGetRelationName(parent_rel))));
if (inh->inhseqno > inhseqno)
inhseqno = inh->inhseqno;
}
@ -6063,12 +6065,12 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
* (In particular, this disallows making a rel inherit from itself.)
*
* This is not completely bulletproof because of race conditions: in
* multi-level inheritance trees, someone else could concurrently
* be making another inheritance link that closes the loop but does
* not join either of the rels we have locked. Preventing that seems
* to require exclusive locks on the entire inheritance tree, which is
* a cure worse than the disease. find_all_inheritors() will cope with
* circularity anyway, so don't sweat it too much.
* multi-level inheritance trees, someone else could concurrently be
* making another inheritance link that closes the loop but does not join
* either of the rels we have locked. Preventing that seems to require
* exclusive locks on the entire inheritance tree, which is a cure worse
* than the disease. find_all_inheritors() will cope with circularity
* anyway, so don't sweat it too much.
*/
children = find_all_inheritors(RelationGetRelid(child_rel));
@ -6095,7 +6097,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
MergeConstraintsIntoExisting(child_rel, parent_rel);
/*
* OK, it looks valid. Make the catalog entries that show inheritance.
* OK, it looks valid. Make the catalog entries that show inheritance.
*/
StoreCatalogInheritance1(RelationGetRelid(child_rel),
RelationGetRelid(parent_rel),
@ -6189,8 +6191,8 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
if (attribute->attnotnull && !childatt->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("column \"%s\" in child table must be marked NOT NULL",
attributeName)));
errmsg("column \"%s\" in child table must be marked NOT NULL",
attributeName)));
/*
* OK, bump the child column's inheritance count. (If we fail
@ -6345,20 +6347,20 @@ ATExecDropInherit(Relation rel, RangeVar *parent)
bool found = false;
/*
* AccessShareLock on the parent is probably enough, seeing that DROP TABLE
* doesn't lock parent tables at all. We need some lock since we'll be
* inspecting the parent's schema.
* AccessShareLock on the parent is probably enough, seeing that DROP
* TABLE doesn't lock parent tables at all. We need some lock since we'll
* be inspecting the parent's schema.
*/
parent_rel = heap_openrv(parent, AccessShareLock);
/*
* We don't bother to check ownership of the parent table --- ownership
* of the child is presumed enough rights.
* We don't bother to check ownership of the parent table --- ownership of
* the child is presumed enough rights.
*/
/*
* Find and destroy the pg_inherits entry linking the two, or error out
* if there is none.
* Find and destroy the pg_inherits entry linking the two, or error out if
* there is none.
*/
catalogRelation = heap_open(InheritsRelationId, RowExclusiveLock);
ScanKeyInit(&key[0],
@ -6508,9 +6510,9 @@ AlterTableNamespace(RangeVar *relation, const char *newschema)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot move an owned sequence into another schema"),
errdetail("Sequence \"%s\" is linked to table \"%s\".",
RelationGetRelationName(rel),
get_rel_name(tableId))));
errdetail("Sequence \"%s\" is linked to table \"%s\".",
RelationGetRelationName(rel),
get_rel_name(tableId))));
}
break;
case RELKIND_COMPOSITE_TYPE:

View File

@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.50 2007/11/15 20:36:40 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.51 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -223,7 +223,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
if (strchr(location, '\''))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("tablespace location cannot contain single quotes")));
errmsg("tablespace location cannot contain single quotes")));
/*
* Allowing relative paths seems risky
@ -356,10 +356,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
}
/*
* Force synchronous commit, to minimize the window between creating
* the symlink on-disk and marking the transaction committed. It's
* not great that there is any window at all, but definitely we don't
* want to make it larger than necessary.
* Force synchronous commit, to minimize the window between creating the
* symlink on-disk and marking the transaction committed. It's not great
* that there is any window at all, but definitely we don't want to make
* it larger than necessary.
*/
ForceSyncCommit();
@ -461,7 +461,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
LWLockAcquire(TablespaceCreateLock, LW_EXCLUSIVE);
/*
* Try to remove the physical infrastructure.
* Try to remove the physical infrastructure.
*/
if (!remove_tablespace_directories(tablespaceoid, false))
{
@ -469,7 +469,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
* Not all files deleted? However, there can be lingering empty files
* in the directories, left behind by for example DROP TABLE, that
* have been scheduled for deletion at next checkpoint (see comments
* in mdunlink() for details). We could just delete them immediately,
* in mdunlink() for details). We could just delete them immediately,
* but we can't tell them apart from important data files that we
* mustn't delete. So instead, we force a checkpoint which will clean
* out any lingering files, and try again.
@ -506,10 +506,10 @@ DropTableSpace(DropTableSpaceStmt *stmt)
*/
/*
* Force synchronous commit, to minimize the window between removing
* the files on-disk and marking the transaction committed. It's
* not great that there is any window at all, but definitely we don't
* want to make it larger than necessary.
* Force synchronous commit, to minimize the window between removing the
* files on-disk and marking the transaction committed. It's not great
* that there is any window at all, but definitely we don't want to make
* it larger than necessary.
*/
ForceSyncCommit();
@ -561,7 +561,7 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo)
*
* If redo is true then ENOENT is a likely outcome here, and we allow it
* to pass without comment. In normal operation we still allow it, but
* with a warning. This is because even though ProcessUtility disallows
* with a warning. This is because even though ProcessUtility disallows
* DROP TABLESPACE in a transaction block, it's possible that a previous
* DROP failed and rolled back after removing the tablespace directories
* and symlink. We want to allow a new DROP attempt to succeed at
@ -1019,12 +1019,12 @@ assign_temp_tablespaces(const char *newval, bool doit, GucSource source)
* transaction, we'll leak a bit of TopTransactionContext memory.
* Doesn't seem worth worrying about.
*/
Oid *tblSpcs;
int numSpcs;
Oid *tblSpcs;
int numSpcs;
ListCell *l;
tblSpcs = (Oid *) MemoryContextAlloc(TopTransactionContext,
list_length(namelist) * sizeof(Oid));
list_length(namelist) * sizeof(Oid));
numSpcs = 0;
foreach(l, namelist)
{
@ -1112,10 +1112,10 @@ PrepareTempTablespaces(void)
return;
/*
* Can't do catalog access unless within a transaction. This is just
* a safety check in case this function is called by low-level code that
* could conceivably execute outside a transaction. Note that in such
* a scenario, fd.c will fall back to using the current database's default
* Can't do catalog access unless within a transaction. This is just a
* safety check in case this function is called by low-level code that
* could conceivably execute outside a transaction. Note that in such a
* scenario, fd.c will fall back to using the current database's default
* tablespace, which should always be OK.
*/
if (!IsTransactionState())
@ -1136,7 +1136,7 @@ PrepareTempTablespaces(void)
/* Store tablespace OIDs in an array in TopTransactionContext */
tblSpcs = (Oid *) MemoryContextAlloc(TopTransactionContext,
list_length(namelist) * sizeof(Oid));
list_length(namelist) * sizeof(Oid));
numSpcs = 0;
foreach(l, namelist)
{
@ -1160,8 +1160,8 @@ PrepareTempTablespaces(void)
}
/*
* Allow explicit specification of database's default tablespace
* in temp_tablespaces without triggering permissions checks.
* Allow explicit specification of database's default tablespace in
* temp_tablespaces without triggering permissions checks.
*/
if (curoid == MyDatabaseTableSpace)
{
@ -1241,8 +1241,8 @@ get_tablespace_name(Oid spc_oid)
/*
* Search pg_tablespace. We use a heapscan here even though there is an
* index on oid, on the theory that pg_tablespace will usually have just
* a few entries and so an indexed lookup is a waste of effort.
* index on oid, on the theory that pg_tablespace will usually have just a
* few entries and so an indexed lookup is a waste of effort.
*/
rel = heap_open(TableSpaceRelationId, AccessShareLock);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/tsearchcmds.c,v 1.5 2007/08/22 22:30:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/tsearchcmds.c,v 1.6 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -46,10 +46,10 @@
#include "utils/syscache.h"
static void MakeConfigurationMapping(AlterTSConfigurationStmt *stmt,
HeapTuple tup, Relation relMap);
static void DropConfigurationMapping(AlterTSConfigurationStmt *stmt,
HeapTuple tup, Relation relMap);
static void MakeConfigurationMapping(AlterTSConfigurationStmt * stmt,
HeapTuple tup, Relation relMap);
static void DropConfigurationMapping(AlterTSConfigurationStmt * stmt,
HeapTuple tup, Relation relMap);
/* --------------------- TS Parser commands ------------------------ */
@ -220,8 +220,8 @@ DefineTSParser(List *names, List *parameters)
else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("text search parser parameter \"%s\" not recognized",
defel->defname)));
errmsg("text search parser parameter \"%s\" not recognized",
defel->defname)));
}
/*
@ -366,7 +366,7 @@ RenameTSParser(List *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("text search parser \"%s\" already exists",
newname)));
newname)));
namestrcpy(&(((Form_pg_ts_parser) GETSTRUCT(tup))->prsname), newname);
simple_heap_update(rel, &tup->t_self, tup);
@ -421,10 +421,9 @@ verify_dictoptions(Oid tmplId, List *dictoptions)
/*
* Suppress this test when running in a standalone backend. This is a
* hack to allow initdb to create prefab dictionaries that might not
* actually be usable in template1's encoding (due to using external
* files that can't be translated into template1's encoding). We want
* to create them anyway, since they might be usable later in other
* databases.
* actually be usable in template1's encoding (due to using external files
* that can't be translated into template1's encoding). We want to create
* them anyway, since they might be usable later in other databases.
*/
if (!IsUnderPostmaster)
return;
@ -445,14 +444,14 @@ verify_dictoptions(Oid tmplId, List *dictoptions)
if (dictoptions)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("text search template \"%s\" does not accept options",
NameStr(tform->tmplname))));
errmsg("text search template \"%s\" does not accept options",
NameStr(tform->tmplname))));
}
else
{
/*
* Copy the options just in case init method thinks it can scribble
* on them ...
* Copy the options just in case init method thinks it can scribble on
* them ...
*/
dictoptions = copyObject(dictoptions);
@ -793,8 +792,8 @@ AlterTSDictionary(AlterTSDictionaryStmt * stmt)
/*
* NOTE: because we only support altering the options, not the template,
* there is no need to update dependencies. This might have to change
* if the options ever reference inside-the-database objects.
* there is no need to update dependencies. This might have to change if
* the options ever reference inside-the-database objects.
*/
heap_freetuple(newtup);
@ -966,7 +965,7 @@ DefineTSTemplate(List *names, List *parameters)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to create text search templates")));
errmsg("must be superuser to create text search templates")));
/* Convert list of names to a name and namespace */
namespaceoid = QualifiedNameGetCreationNamespace(names, &tmplname);
@ -1048,7 +1047,7 @@ RenameTSTemplate(List *oldname, const char *newname)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to rename text search templates")));
errmsg("must be superuser to rename text search templates")));
rel = heap_open(TSTemplateRelationId, RowExclusiveLock);
@ -1633,7 +1632,7 @@ AlterTSConfigurationOwner(List *name, Oid newOwnerId)
* ALTER TEXT SEARCH CONFIGURATION - main entry point
*/
void
AlterTSConfiguration(AlterTSConfigurationStmt *stmt)
AlterTSConfiguration(AlterTSConfigurationStmt * stmt)
{
HeapTuple tup;
Relation relMap;
@ -1727,7 +1726,7 @@ getTokenTypes(Oid prsId, List *tokennames)
* ALTER TEXT SEARCH CONFIGURATION ADD/ALTER MAPPING
*/
static void
MakeConfigurationMapping(AlterTSConfigurationStmt *stmt,
MakeConfigurationMapping(AlterTSConfigurationStmt * stmt,
HeapTuple tup, Relation relMap)
{
Oid cfgId = HeapTupleGetOid(tup);
@ -1889,7 +1888,7 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt,
* ALTER TEXT SEARCH CONFIGURATION DROP MAPPING
*/
static void
DropConfigurationMapping(AlterTSConfigurationStmt *stmt,
DropConfigurationMapping(AlterTSConfigurationStmt * stmt,
HeapTuple tup, Relation relMap)
{
Oid cfgId = HeapTupleGetOid(tup);
@ -1981,7 +1980,7 @@ serialize_deflist(List *deflist)
char *val = defGetString(defel);
appendStringInfo(&buf, "%s = ",
quote_identifier(defel->defname));
quote_identifier(defel->defname));
/* If backslashes appear, force E syntax to determine their handling */
if (strchr(val, '\\'))
appendStringInfoChar(&buf, ESCAPE_STRING_SYNTAX);
@ -2014,7 +2013,7 @@ serialize_deflist(List *deflist)
List *
deserialize_deflist(Datum txt)
{
text *in = DatumGetTextP(txt); /* in case it's toasted */
text *in = DatumGetTextP(txt); /* in case it's toasted */
List *result = NIL;
int len = VARSIZE(in) - VARHDRSZ;
char *ptr,
@ -2022,7 +2021,8 @@ deserialize_deflist(Datum txt)
*workspace,
*wsptr = NULL,
*startvalue = NULL;
typedef enum {
typedef enum
{
CS_WAITKEY,
CS_INKEY,
CS_INQKEY,
@ -2031,7 +2031,7 @@ deserialize_deflist(Datum txt)
CS_INSQVALUE,
CS_INDQVALUE,
CS_INWVALUE
} ds_state;
} ds_state;
ds_state state = CS_WAITKEY;
workspace = (char *) palloc(len + 1); /* certainly enough room */
@ -2075,7 +2075,7 @@ deserialize_deflist(Datum txt)
case CS_INQKEY:
if (*ptr == '"')
{
if (ptr+1 < endptr && ptr[1] == '"')
if (ptr + 1 < endptr && ptr[1] == '"')
{
/* copy only one of the two quotes */
*wsptr++ = *ptr++;
@ -2106,7 +2106,7 @@ deserialize_deflist(Datum txt)
startvalue = wsptr;
state = CS_INSQVALUE;
}
else if (*ptr == 'E' && ptr+1 < endptr && ptr[1] == '\'')
else if (*ptr == 'E' && ptr + 1 < endptr && ptr[1] == '\'')
{
ptr++;
startvalue = wsptr;
@ -2127,7 +2127,7 @@ deserialize_deflist(Datum txt)
case CS_INSQVALUE:
if (*ptr == '\'')
{
if (ptr+1 < endptr && ptr[1] == '\'')
if (ptr + 1 < endptr && ptr[1] == '\'')
{
/* copy only one of the two quotes */
*wsptr++ = *ptr++;
@ -2137,13 +2137,13 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
(Node *) makeString(pstrdup(startvalue))));
(Node *) makeString(pstrdup(startvalue))));
state = CS_WAITKEY;
}
}
else if (*ptr == '\\')
{
if (ptr+1 < endptr && ptr[1] == '\\')
if (ptr + 1 < endptr && ptr[1] == '\\')
{
/* copy only one of the two backslashes */
*wsptr++ = *ptr++;
@ -2159,7 +2159,7 @@ deserialize_deflist(Datum txt)
case CS_INDQVALUE:
if (*ptr == '"')
{
if (ptr+1 < endptr && ptr[1] == '"')
if (ptr + 1 < endptr && ptr[1] == '"')
{
/* copy only one of the two quotes */
*wsptr++ = *ptr++;
@ -2169,7 +2169,7 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
(Node *) makeString(pstrdup(startvalue))));
(Node *) makeString(pstrdup(startvalue))));
state = CS_WAITKEY;
}
}
@ -2184,7 +2184,7 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
(Node *) makeString(pstrdup(startvalue))));
(Node *) makeString(pstrdup(startvalue))));
state = CS_WAITKEY;
}
else
@ -2203,7 +2203,7 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
(Node *) makeString(pstrdup(startvalue))));
(Node *) makeString(pstrdup(startvalue))));
}
else if (state != CS_WAITKEY)
ereport(ERROR,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.110 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.111 2007/11/15 21:14:34 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -120,11 +120,11 @@ DefineType(List *names, List *parameters)
Oid typmodoutOid = InvalidOid;
Oid analyzeOid = InvalidOid;
char *array_type;
Oid array_oid;
Oid array_oid;
ListCell *pl;
Oid typoid;
Oid resulttype;
Relation pg_type;
Relation pg_type;
/* Convert list of names to a name and namespace */
typeNamespace = QualifiedNameGetCreationNamespace(names, &typeName);
@ -145,8 +145,8 @@ DefineType(List *names, List *parameters)
0, 0);
/*
* If it's not a shell, see if it's an autogenerated array type,
* and if so rename it out of the way.
* If it's not a shell, see if it's an autogenerated array type, and if so
* rename it out of the way.
*/
if (OidIsValid(typoid) && get_typisdefined(typoid))
{
@ -155,8 +155,8 @@ DefineType(List *names, List *parameters)
}
/*
* If it doesn't exist, create it as a shell, so that the OID is known
* for use in the I/O function definitions.
* If it doesn't exist, create it as a shell, so that the OID is known for
* use in the I/O function definitions.
*/
if (!OidIsValid(typoid))
{
@ -404,7 +404,7 @@ DefineType(List *names, List *parameters)
NameListToString(analyzeName));
/* Preassign array type OID so we can insert it in pg_type.typarray */
pg_type = heap_open(TypeRelationId, AccessShareLock);
pg_type = heap_open(TypeRelationId, AccessShareLock);
array_oid = GetNewOid(pg_type);
heap_close(pg_type, AccessShareLock);
@ -418,14 +418,14 @@ DefineType(List *names, List *parameters)
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
internalLength, /* internal size */
TYPTYPE_BASE, /* type-type (base type) */
TYPTYPE_BASE, /* type-type (base type) */
delimiter, /* array element delimiter */
inputOid, /* input procedure */
outputOid, /* output procedure */
receiveOid, /* receive procedure */
sendOid, /* send procedure */
typmodinOid, /* typmodin procedure */
typmodoutOid,/* typmodout procedure */
typmodoutOid, /* typmodout procedure */
analyzeOid, /* analyze procedure */
elemType, /* element type ID */
false, /* this is not an array type */
@ -517,7 +517,7 @@ RemoveType(List *names, DropBehavior behavior, bool missing_ok)
return;
}
typeoid = typeTypeId(tup);
typeoid = typeTypeId(tup);
typ = (Form_pg_type) GETSTRUCT(tup);
/* Permission check: must own type or its namespace */
@ -564,9 +564,9 @@ RemoveTypeById(Oid typeOid)
simple_heap_delete(relation, &tup->t_self);
/*
* If it is an enum, delete the pg_enum entries too; we don't bother
* with making dependency entries for those, so it has to be done
* "by hand" here.
* If it is an enum, delete the pg_enum entries too; we don't bother with
* making dependency entries for those, so it has to be done "by hand"
* here.
*/
if (((Form_pg_type) GETSTRUCT(tup))->typtype == TYPTYPE_ENUM)
EnumValuesDelete(typeOid);
@ -628,7 +628,7 @@ DefineDomain(CreateDomainStmt *stmt)
get_namespace_name(domainNamespace));
/*
* Check for collision with an existing type name. If there is one and
* Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
@ -651,10 +651,9 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid = HeapTupleGetOid(typeTup);
/*
* Base type must be a plain base type, another domain or an enum.
* Domains over pseudotypes would create a security hole. Domains
* over composite types might be made to work in the future, but not
* today.
* Base type must be a plain base type, another domain or an enum. Domains
* over pseudotypes would create a security hole. Domains over composite
* types might be made to work in the future, but not today.
*/
typtype = baseType->typtype;
if (typtype != TYPTYPE_BASE &&
@ -751,8 +750,8 @@ DefineDomain(CreateDomainStmt *stmt)
pstate = make_parsestate(NULL);
/*
* Cook the constr->raw_expr into an expression.
* Note: name is strictly for error message
* Cook the constr->raw_expr into an expression. Note:
* name is strictly for error message
*/
defaultExpr = cookDefault(pstate, constr->raw_expr,
basetypeoid,
@ -760,8 +759,8 @@ DefineDomain(CreateDomainStmt *stmt)
domainName);
/*
* If the expression is just a NULL constant, we treat
* it like not having a default.
* If the expression is just a NULL constant, we treat it
* like not having a default.
*
* Note that if the basetype is another domain, we'll see
* a CoerceToDomain expr here and not discard the default.
@ -786,7 +785,7 @@ DefineDomain(CreateDomainStmt *stmt)
defaultValue =
deparse_expression(defaultExpr,
deparse_context_for(domainName,
InvalidOid),
InvalidOid),
false, false);
defaultValueBin = nodeToString(defaultExpr);
}
@ -872,8 +871,8 @@ DefineDomain(CreateDomainStmt *stmt)
outputProcedure, /* output procedure */
receiveProcedure, /* receive procedure */
sendProcedure, /* send procedure */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
analyzeProcedure, /* analyze procedure */
typelem, /* element type ID */
false, /* this isn't an array */
@ -961,7 +960,7 @@ RemoveDomain(List *names, DropBehavior behavior, bool missing_ok)
return;
}
typeoid = typeTypeId(tup);
typeoid = typeTypeId(tup);
/* Permission check: must own type or its namespace */
if (!pg_type_ownercheck(typeoid, GetUserId()) &&
@ -996,16 +995,16 @@ RemoveDomain(List *names, DropBehavior behavior, bool missing_ok)
* Registers a new enum.
*/
void
DefineEnum(CreateEnumStmt *stmt)
DefineEnum(CreateEnumStmt * stmt)
{
char *enumName;
char *enumArrayName;
Oid enumNamespace;
Oid enumTypeOid;
char *enumName;
char *enumArrayName;
Oid enumNamespace;
Oid enumTypeOid;
AclResult aclresult;
Oid old_type_oid;
Oid enumArrayOid;
Relation pg_type;
Oid old_type_oid;
Oid enumArrayOid;
Relation pg_type;
/* Convert list of names to a name and namespace */
enumNamespace = QualifiedNameGetCreationNamespace(stmt->typename,
@ -1018,7 +1017,7 @@ DefineEnum(CreateEnumStmt *stmt)
get_namespace_name(enumNamespace));
/*
* Check for collision with an existing type name. If there is one and
* Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
@ -1034,39 +1033,39 @@ DefineEnum(CreateEnumStmt *stmt)
}
/* Preassign array type OID so we can insert it in pg_type.typarray */
pg_type = heap_open(TypeRelationId, AccessShareLock);
pg_type = heap_open(TypeRelationId, AccessShareLock);
enumArrayOid = GetNewOid(pg_type);
heap_close(pg_type, AccessShareLock);
/* Create the pg_type entry */
enumTypeOid =
TypeCreate(InvalidOid, /* no predetermined type OID */
enumName, /* type name */
enumNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
sizeof(Oid), /* internal size */
enumTypeOid =
TypeCreate(InvalidOid, /* no predetermined type OID */
enumName, /* type name */
enumNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
sizeof(Oid), /* internal size */
TYPTYPE_ENUM, /* type-type (enum type) */
DEFAULT_TYPDELIM, /* array element delimiter */
F_ENUM_IN, /* input procedure */
F_ENUM_OUT, /* output procedure */
F_ENUM_RECV, /* receive procedure */
F_ENUM_SEND, /* send procedure */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
InvalidOid, /* analyze procedure - default */
InvalidOid, /* element type ID */
false, /* this is not an array type */
F_ENUM_IN, /* input procedure */
F_ENUM_OUT, /* output procedure */
F_ENUM_RECV, /* receive procedure */
F_ENUM_SEND, /* send procedure */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
InvalidOid, /* analyze procedure - default */
InvalidOid, /* element type ID */
false, /* this is not an array type */
enumArrayOid, /* array type we are about to create */
InvalidOid, /* base type ID (only for domains) */
NULL, /* never a default type value */
NULL, /* binary default isn't sent either */
true, /* always passed by value */
'i', /* int alignment */
'p', /* TOAST strategy always plain */
-1, /* typMod (Domains only) */
0, /* Array dimensions of typbasetype */
false); /* Type NOT NULL */
InvalidOid, /* base type ID (only for domains) */
NULL, /* never a default type value */
NULL, /* binary default isn't sent either */
true, /* always passed by value */
'i', /* int alignment */
'p', /* TOAST strategy always plain */
-1, /* typMod (Domains only) */
0, /* Array dimensions of typbasetype */
false); /* Type NOT NULL */
/* Enter the enum's values into pg_enum */
EnumValuesCreate(enumTypeOid, stmt->vals);
@ -1077,31 +1076,31 @@ DefineEnum(CreateEnumStmt *stmt)
enumArrayName = makeArrayTypeName(enumName, enumNamespace);
TypeCreate(enumArrayOid, /* force assignment of this type OID */
enumArrayName, /* type name */
enumNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
-1, /* internal size (always varlena) */
enumArrayName, /* type name */
enumNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
-1, /* internal size (always varlena) */
TYPTYPE_BASE, /* type-type (base type) */
DEFAULT_TYPDELIM, /* array element delimiter */
F_ARRAY_IN, /* input procedure */
F_ARRAY_OUT, /* output procedure */
F_ARRAY_RECV, /* receive procedure */
F_ARRAY_SEND, /* send procedure */
DEFAULT_TYPDELIM, /* array element delimiter */
F_ARRAY_IN, /* input procedure */
F_ARRAY_OUT, /* output procedure */
F_ARRAY_RECV, /* receive procedure */
F_ARRAY_SEND, /* send procedure */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
InvalidOid, /* analyze procedure - default */
enumTypeOid, /* element type ID */
InvalidOid, /* analyze procedure - default */
enumTypeOid, /* element type ID */
true, /* yes this is an array type */
InvalidOid, /* no further array type */
InvalidOid, /* base type ID */
NULL, /* never a default type value */
NULL, /* binary default isn't sent either */
false, /* never passed by value */
'i', /* enums have align i, so do their arrays */
'x', /* ARRAY is always toastable */
-1, /* typMod (Domains only) */
0, /* Array dimensions of typbasetype */
InvalidOid, /* base type ID */
NULL, /* never a default type value */
NULL, /* binary default isn't sent either */
false, /* never passed by value */
'i', /* enums have align i, so do their arrays */
'x', /* ARRAY is always toastable */
-1, /* typMod (Domains only) */
0, /* Array dimensions of typbasetype */
false); /* Type NOT NULL */
pfree(enumArrayName);
@ -1475,7 +1474,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
* DefineDomain.)
*/
if (defaultExpr == NULL ||
(IsA(defaultExpr, Const) && ((Const *) defaultExpr)->constisnull))
(IsA(defaultExpr, Const) &&((Const *) defaultExpr)->constisnull))
{
/* Default is NULL, drop it */
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
@ -1493,13 +1492,13 @@ AlterDomainDefault(List *names, Node *defaultRaw)
defaultValue = deparse_expression(defaultExpr,
deparse_context_for(NameStr(typTup->typname),
InvalidOid),
false, false);
false, false);
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
CStringGetDatum(nodeToString(defaultExpr)));
CStringGetDatum(nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
@ -1527,7 +1526,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
/* Rebuild dependencies */
GenerateTypeDependencies(typTup->typnamespace,
domainoid,
InvalidOid, /* typrelid is n/a */
InvalidOid, /* typrelid is n/a */
0, /* relation kind is n/a */
typTup->typowner,
typTup->typinput,
@ -1956,9 +1955,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
if (pg_depend->classid == TypeRelationId)
{
Assert(get_typtype(pg_depend->objid) == TYPTYPE_DOMAIN);
/*
* Recursively add dependent columns to the output list. This
* is a bit inefficient since we may fail to combine RelToCheck
* Recursively add dependent columns to the output list. This is
* a bit inefficient since we may fail to combine RelToCheck
* entries when attributes of the same rel have different derived
* domain types, but it's probably not worth improving.
*/
@ -2365,7 +2365,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("type \"%s\" does not exist",
TypeNameToString(typename))));
typeOid = typeTypeId(tup);
typeOid = typeTypeId(tup);
/* Copy the syscache entry so we can scribble on it below */
newtup = heap_copytuple(tup);
@ -2375,8 +2375,8 @@ AlterTypeOwner(List *names, Oid newOwnerId)
/*
* If it's a composite type, we need to check that it really is a
* free-standing composite type, and not a table's rowtype. We
* want people to use ALTER TABLE not ALTER TYPE for that case.
* free-standing composite type, and not a table's rowtype. We want people
* to use ALTER TABLE not ALTER TYPE for that case.
*/
if (typTup->typtype == TYPTYPE_COMPOSITE &&
get_rel_relkind(typTup->typrelid) != RELKIND_COMPOSITE_TYPE)
@ -2423,8 +2423,8 @@ AlterTypeOwner(List *names, Oid newOwnerId)
}
/*
* If it's a composite type, invoke ATExecChangeOwner so that we
* fix up the pg_class entry properly. That will call back to
* If it's a composite type, invoke ATExecChangeOwner so that we fix
* up the pg_class entry properly. That will call back to
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
@ -2458,7 +2458,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
/*
* AlterTypeOwnerInternal - change type owner unconditionally
*
* This is currently only used to propagate ALTER TABLE/TYPE OWNER to a
* This is currently only used to propagate ALTER TABLE/TYPE OWNER to a
* table's rowtype or an array type, and to implement REASSIGN OWNED BY.
* It assumes the caller has done all needed checks. The function will
* automatically recurse to an array type if the type has one.
@ -2547,7 +2547,7 @@ AlterTypeNamespace(List *names, const char *newschema)
* Caller must have already checked privileges.
*
* The function automatically recurses to process the type's array type,
* if any. isImplicitArray should be TRUE only when doing this internal
* if any. isImplicitArray should be TRUE only when doing this internal
* recursion (outside callers must never try to move an array type directly).
*
* If errorOnTableType is TRUE, the function errors out if the type is

View File

@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.360 2007/10/24 20:55:36 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.361 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -592,19 +592,19 @@ vacuum_set_xid_limits(int freeze_min_age, bool sharedRel,
/*
* We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
* tables. Since lazy vacuum doesn't write its XID anywhere, it's
* safe to ignore it. In theory it could be problematic to ignore lazy
* vacuums on a full vacuum, but keep in mind that only one vacuum process
* can be working on a particular table at any time, and that each vacuum
* is always an independent transaction.
* tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
* ignore it. In theory it could be problematic to ignore lazy vacuums on
* a full vacuum, but keep in mind that only one vacuum process can be
* working on a particular table at any time, and that each vacuum is
* always an independent transaction.
*/
*oldestXmin = GetOldestXmin(sharedRel, true);
Assert(TransactionIdIsNormal(*oldestXmin));
/*
* Determine the minimum freeze age to use: as specified by the caller,
* or vacuum_freeze_min_age, but in any case not more than half
* Determine the minimum freeze age to use: as specified by the caller, or
* vacuum_freeze_min_age, but in any case not more than half
* autovacuum_freeze_max_age, so that autovacuums to prevent XID
* wraparound won't occur too frequently.
*/
@ -623,8 +623,8 @@ vacuum_set_xid_limits(int freeze_min_age, bool sharedRel,
/*
* If oldestXmin is very far back (in practice, more than
* autovacuum_freeze_max_age / 2 XIDs old), complain and force a
* minimum freeze age of zero.
* autovacuum_freeze_max_age / 2 XIDs old), complain and force a minimum
* freeze age of zero.
*/
safeLimit = ReadNewTransactionId() - autovacuum_freeze_max_age;
if (!TransactionIdIsNormal(safeLimit))
@ -758,7 +758,7 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
* advance pg_database.datfrozenxid, also try to truncate pg_clog.
*
* We violate transaction semantics here by overwriting the database's
* existing pg_database tuple with the new value. This is reasonably
* existing pg_database tuple with the new value. This is reasonably
* safe since the new value is correct whether or not this transaction
* commits. As with vac_update_relstats, this avoids leaving dead tuples
* behind after a VACUUM.
@ -777,7 +777,7 @@ vac_update_datfrozenxid(void)
bool dirty = false;
/*
* Initialize the "min" calculation with RecentGlobalXmin. Any
* Initialize the "min" calculation with RecentGlobalXmin. Any
* not-yet-committed pg_class entries for new tables must have
* relfrozenxid at least this high, because any other open xact must have
* RecentXmin >= its PGPROC.xmin >= our RecentGlobalXmin; see
@ -848,8 +848,7 @@ vac_update_datfrozenxid(void)
/*
* If we were able to advance datfrozenxid, mark the flat-file copy of
* pg_database for update at commit, and see if we can truncate
* pg_clog.
* pg_database for update at commit, and see if we can truncate pg_clog.
*/
if (dirty)
{
@ -893,10 +892,10 @@ vac_truncate_clog(TransactionId frozenXID)
* inserted by CREATE DATABASE. Any such entry will have a copy of some
* existing DB's datfrozenxid, and that source DB cannot be ours because
* of the interlock against copying a DB containing an active backend.
* Hence the new entry will not reduce the minimum. Also, if two
* VACUUMs concurrently modify the datfrozenxid's of different databases,
* the worst possible outcome is that pg_clog is not truncated as
* aggressively as it could be.
* Hence the new entry will not reduce the minimum. Also, if two VACUUMs
* concurrently modify the datfrozenxid's of different databases, the
* worst possible outcome is that pg_clog is not truncated as aggressively
* as it could be.
*/
relation = heap_open(DatabaseRelationId, AccessShareLock);
@ -989,13 +988,13 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*
* We can furthermore set the PROC_IN_VACUUM flag, which lets other
* concurrent VACUUMs know that they can ignore this one while
* determining their OldestXmin. (The reason we don't set it
* during a full VACUUM is exactly that we may have to run user-
* defined functions for functional indexes, and we want to make sure
* that if they use the snapshot set above, any tuples it requires
* can't get removed from other tables. An index function that
* depends on the contents of other tables is arguably broken, but we
* won't break it here by violating transaction semantics.)
* determining their OldestXmin. (The reason we don't set it during a
* full VACUUM is exactly that we may have to run user- defined
* functions for functional indexes, and we want to make sure that if
* they use the snapshot set above, any tuples it requires can't get
* removed from other tables. An index function that depends on the
* contents of other tables is arguably broken, but we won't break it
* here by violating transaction semantics.)
*
* Note: this flag remains set until CommitTransaction or
* AbortTransaction. We don't want to clear it until we reset
@ -1168,8 +1167,8 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
/*
* Flush any previous async-commit transactions. This does not guarantee
* that we will be able to set hint bits for tuples they inserted, but
* it improves the probability, especially in simple sequential-commands
* that we will be able to set hint bits for tuples they inserted, but it
* improves the probability, especially in simple sequential-commands
* cases. See scan_heap() and repair_frag() for more about this.
*/
XLogAsyncCommitFlush();
@ -1319,10 +1318,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* dirty. To ensure that invalid data doesn't get written to disk, we
* must take exclusive buffer lock wherever we potentially modify
* pages. In fact, we insist on cleanup lock so that we can safely
* call heap_page_prune(). (This might be overkill, since the bgwriter
* pays no attention to individual tuples, but on the other hand it's
* unlikely that the bgwriter has this particular page pinned at this
* instant. So violating the coding rule would buy us little anyway.)
* call heap_page_prune(). (This might be overkill, since the
* bgwriter pays no attention to individual tuples, but on the other
* hand it's unlikely that the bgwriter has this particular page
* pinned at this instant. So violating the coding rule would buy us
* little anyway.)
*/
LockBufferForCleanup(buf);
@ -1365,7 +1365,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
continue;
}
/*
/*
* Prune all HOT-update chains in this page.
*
* We use the redirect_move option so that redirecting line pointers
@ -1377,8 +1377,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
true, false);
/*
* Now scan the page to collect vacuumable items and check for
* tuples requiring freezing.
* Now scan the page to collect vacuumable items and check for tuples
* requiring freezing.
*/
nfrozen = 0;
notup = true;
@ -1393,9 +1393,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Collect un-used items too - it's possible to have indexes
* pointing here after crash. (That's an ancient comment and
* is likely obsolete with WAL, but we might as well continue
* to check for such problems.)
* pointing here after crash. (That's an ancient comment and is
* likely obsolete with WAL, but we might as well continue to
* check for such problems.)
*/
if (!ItemIdIsUsed(itemid))
{
@ -1406,9 +1406,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* DEAD item pointers are to be vacuumed normally; but we don't
* count them in tups_vacuumed, else we'd be double-counting
* (at least in the common case where heap_page_prune() just
* freed up a non-HOT tuple).
* count them in tups_vacuumed, else we'd be double-counting (at
* least in the common case where heap_page_prune() just freed up
* a non-HOT tuple).
*/
if (ItemIdIsDead(itemid))
{
@ -1433,12 +1433,13 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
!OidIsValid(HeapTupleGetOid(&tuple)))
elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
relname, blkno, offnum);
/*
* The shrinkage phase of VACUUM FULL requires that all
* live tuples have XMIN_COMMITTED set --- see comments in
* repair_frag()'s walk-along-page loop. Use of async
* commit may prevent HeapTupleSatisfiesVacuum from
* setting the bit for a recently committed tuple. Rather
* setting the bit for a recently committed tuple. Rather
* than trying to handle this corner case, we just give up
* and don't shrink.
*/
@ -1448,30 +1449,31 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: XMIN_COMMITTED not set for transaction %u --- cannot shrink relation",
relname, blkno, offnum,
HeapTupleHeaderGetXmin(tuple.t_data))));
HeapTupleHeaderGetXmin(tuple.t_data))));
do_shrinking = false;
}
break;
case HEAPTUPLE_DEAD:
/*
* Ordinarily, DEAD tuples would have been removed by
* heap_page_prune(), but it's possible that the tuple
* state changed since heap_page_prune() looked. In
* particular an INSERT_IN_PROGRESS tuple could have
* changed to DEAD if the inserter aborted. So this
* cannot be considered an error condition, though it
* does suggest that someone released a lock early.
* cannot be considered an error condition, though it does
* suggest that someone released a lock early.
*
* If the tuple is HOT-updated then it must only be
* removed by a prune operation; so we keep it as if it
* were RECENTLY_DEAD, and abandon shrinking. (XXX is it
* worth trying to make the shrinking code smart enough
* to handle this? It's an unusual corner case.)
* worth trying to make the shrinking code smart enough to
* handle this? It's an unusual corner case.)
*
* DEAD heap-only tuples can safely be removed if they
* aren't themselves HOT-updated, although this is a bit
* inefficient since we'll uselessly try to remove
* index entries for them.
* inefficient since we'll uselessly try to remove index
* entries for them.
*/
if (HeapTupleIsHotUpdated(&tuple))
{
@ -1484,7 +1486,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
}
else
{
tupgone = true; /* we can delete the tuple */
tupgone = true; /* we can delete the tuple */
/*
* We need not require XMIN_COMMITTED or
* XMAX_COMMITTED to be set, since we will remove the
@ -1502,8 +1505,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
nkeep += 1;
/*
* As with the LIVE case, shrinkage requires XMIN_COMMITTED
* to be set.
* As with the LIVE case, shrinkage requires
* XMIN_COMMITTED to be set.
*/
if (do_shrinking &&
!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
@ -1511,7 +1514,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: XMIN_COMMITTED not set for transaction %u --- cannot shrink relation",
relname, blkno, offnum,
HeapTupleHeaderGetXmin(tuple.t_data))));
HeapTupleHeaderGetXmin(tuple.t_data))));
do_shrinking = false;
}
@ -1542,15 +1545,15 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* This should not happen, since we hold exclusive lock on
* the relation; shouldn't we raise an error? (Actually,
* it can happen in system catalogs, since we tend to
* release write lock before commit there.) As above,
* we can't apply repair_frag() if the tuple state is
* release write lock before commit there.) As above, we
* can't apply repair_frag() if the tuple state is
* uncertain.
*/
if (do_shrinking)
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- cannot shrink relation",
relname, blkno, offnum,
HeapTupleHeaderGetXmin(tuple.t_data))));
HeapTupleHeaderGetXmin(tuple.t_data))));
do_shrinking = false;
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
@ -1559,15 +1562,15 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* This should not happen, since we hold exclusive lock on
* the relation; shouldn't we raise an error? (Actually,
* it can happen in system catalogs, since we tend to
* release write lock before commit there.) As above,
* we can't apply repair_frag() if the tuple state is
* release write lock before commit there.) As above, we
* can't apply repair_frag() if the tuple state is
* uncertain.
*/
if (do_shrinking)
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- cannot shrink relation",
relname, blkno, offnum,
HeapTupleHeaderGetXmax(tuple.t_data))));
HeapTupleHeaderGetXmax(tuple.t_data))));
do_shrinking = false;
break;
default:
@ -1615,8 +1618,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
max_tlen = tuple.t_len;
/*
* Each non-removable tuple must be checked to see if it
* needs freezing.
* Each non-removable tuple must be checked to see if it needs
* freezing.
*/
if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
InvalidBuffer))
@ -1996,11 +1999,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (i >= vacpage->offsets_free) /* not found */
{
vacpage->offsets[vacpage->offsets_free++] = offnum;
/*
* If this is not a heap-only tuple, there must be an
* index entry for this item which will be removed in
* the index cleanup. Decrement the keep_indexed_tuples
* count to remember this.
* the index cleanup. Decrement the
* keep_indexed_tuples count to remember this.
*/
if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
keep_indexed_tuples--;
@ -2010,11 +2014,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else
{
vacpage->offsets[vacpage->offsets_free++] = offnum;
/*
* If this is not a heap-only tuple, there must be an
* index entry for this item which will be removed in
* the index cleanup. Decrement the keep_indexed_tuples
* count to remember this.
* index entry for this item which will be removed in the
* index cleanup. Decrement the keep_indexed_tuples count
* to remember this.
*/
if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
keep_indexed_tuples--;
@ -2051,10 +2056,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* Also, because we distinguish DEAD and RECENTLY_DEAD tuples
* using OldestXmin, which is a rather coarse test, it is quite
* possible to have an update chain in which a tuple we think is
* RECENTLY_DEAD links forward to one that is definitely DEAD.
* In such a case the RECENTLY_DEAD tuple must actually be dead,
* but it seems too complicated to try to make VACUUM remove it.
* We treat each contiguous set of RECENTLY_DEAD tuples as a
* RECENTLY_DEAD links forward to one that is definitely DEAD. In
* such a case the RECENTLY_DEAD tuple must actually be dead, but
* it seems too complicated to try to make VACUUM remove it. We
* treat each contiguous set of RECENTLY_DEAD tuples as a
* separately movable chain, ignoring any intervening DEAD ones.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
@ -2096,11 +2101,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* If this tuple is in the begin/middle of the chain then we
* have to move to the end of chain. As with any t_ctid
* chase, we have to verify that each new tuple is really the
* descendant of the tuple we came from; however, here we
* need even more than the normal amount of paranoia.
* If t_ctid links forward to a tuple determined to be DEAD,
* then depending on where that tuple is, it might already
* have been removed, and perhaps even replaced by a MOVED_IN
* descendant of the tuple we came from; however, here we need
* even more than the normal amount of paranoia. If t_ctid
* links forward to a tuple determined to be DEAD, then
* depending on where that tuple is, it might already have
* been removed, and perhaps even replaced by a MOVED_IN
* tuple. We don't want to include any DEAD tuples in the
* chain, so we have to recheck HeapTupleSatisfiesVacuum.
*/
@ -2116,7 +2121,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
OffsetNumber nextOffnum;
ItemId nextItemid;
HeapTupleHeader nextTdata;
HTSV_Result nextTstatus;
HTSV_Result nextTstatus;
nextTid = tp.t_data->t_ctid;
priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
@ -2148,10 +2153,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ReleaseBuffer(nextBuf);
break;
}
/*
* Must check for DEAD or MOVED_IN tuple, too. This
* could potentially update hint bits, so we'd better
* hold the buffer content lock.
* Must check for DEAD or MOVED_IN tuple, too. This could
* potentially update hint bits, so we'd better hold the
* buffer content lock.
*/
LockBuffer(nextBuf, BUFFER_LOCK_SHARE);
nextTstatus = HeapTupleSatisfiesVacuum(nextTdata,
@ -2266,7 +2272,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
tp.t_self = vtlp->this_tid;
Pbuf = ReadBufferWithStrategy(onerel,
ItemPointerGetBlockNumber(&(tp.t_self)),
ItemPointerGetBlockNumber(&(tp.t_self)),
vac_strategy);
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
@ -2350,7 +2356,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBufferWithStrategy(onerel,
ItemPointerGetBlockNumber(&(tuple.t_self)),
ItemPointerGetBlockNumber(&(tuple.t_self)),
vac_strategy);
/* Get page to move to */
@ -2375,10 +2381,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
&ec, &Ctid, vtmove[ti].cleanVpd);
/*
* If the tuple we are moving is a heap-only tuple,
* this move will generate an additional index entry,
* so increment the rel_indexed_tuples count.
*/
* If the tuple we are moving is a heap-only tuple, this
* move will generate an additional index entry, so
* increment the rel_indexed_tuples count.
*/
if (HeapTupleHeaderIsHeapOnly(tuple.t_data))
vacrelstats->rel_indexed_tuples++;
@ -2398,22 +2404,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* When we move tuple chains, we may need to move
* tuples from a block that we haven't yet scanned in
* the outer walk-along-the-relation loop. Note that we
* can't be moving a tuple from a block that we have
* already scanned because if such a tuple exists, then
* we must have moved the chain along with that tuple
* when we scanned that block. IOW the test of
* (Cbuf != buf) guarantees that the tuple we are
* looking at right now is in a block which is yet to
* be scanned.
* the outer walk-along-the-relation loop. Note that
* we can't be moving a tuple from a block that we
* have already scanned because if such a tuple
* exists, then we must have moved the chain along
* with that tuple when we scanned that block. IOW the
* test of (Cbuf != buf) guarantees that the tuple we
* are looking at right now is in a block which is yet
* to be scanned.
*
* We maintain two counters to correctly count the
* moved-off tuples from blocks that are not yet
* scanned (keep_tuples) and how many of them have
* index pointers (keep_indexed_tuples). The main
* reason to track the latter is to help verify
* that indexes have the expected number of entries
* when all the dust settles.
* reason to track the latter is to help verify that
* indexes have the expected number of entries when
* all the dust settles.
*/
if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
keep_indexed_tuples++;
@ -2467,9 +2473,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
dst_buffer, dst_page, dst_vacpage, &ec);
/*
* If the tuple we are moving is a heap-only tuple,
* this move will generate an additional index entry,
* so increment the rel_indexed_tuples count.
* If the tuple we are moving is a heap-only tuple, this move will
* generate an additional index entry, so increment the
* rel_indexed_tuples count.
*/
if (HeapTupleHeaderIsHeapOnly(tuple.t_data))
vacrelstats->rel_indexed_tuples++;
@ -2538,11 +2544,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
vacpage->offsets[vacpage->offsets_free++] = off;
Assert(keep_tuples > 0);
/*
* If this is not a heap-only tuple, there must be an
* index entry for this item which will be removed in
* the index cleanup. Decrement the keep_indexed_tuples
* count to remember this.
* the index cleanup. Decrement the
* keep_indexed_tuples count to remember this.
*/
if (!HeapTupleHeaderIsHeapOnly(htup))
keep_indexed_tuples--;
@ -2594,14 +2601,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* exclusive access to the relation. However, that would require a
* lot of extra code to close and re-open the relation, indexes, etc.
* For now, a quick hack: record status of current transaction as
* committed, and continue. We force the commit to be synchronous
* so that it's down to disk before we truncate. (Note: tqual.c
* knows that VACUUM FULL always uses sync commit, too.) The
* transaction continues to be shown as running in the ProcArray.
* committed, and continue. We force the commit to be synchronous so
* that it's down to disk before we truncate. (Note: tqual.c knows
* that VACUUM FULL always uses sync commit, too.) The transaction
* continues to be shown as running in the ProcArray.
*
* XXX This desperately needs to be revisited. Any failure after
* this point will result in a PANIC "cannot abort transaction nnn,
* it was already committed"!
* XXX This desperately needs to be revisited. Any failure after this
* point will result in a PANIC "cannot abort transaction nnn, it was
* already committed"!
*/
ForceSyncCommit();
(void) RecordTransactionCommit();

View File

@ -13,7 +13,7 @@
* We are willing to use at most maintenance_work_mem memory space to keep
* track of dead tuples. We initially allocate an array of TIDs of that size,
* with an upper limit that depends on table size (this limit ensures we don't
* allocate a huge area uselessly for vacuuming small tables). If the array
* allocate a huge area uselessly for vacuuming small tables). If the array
* threatens to overflow, we suspend the heap scan phase and perform a pass of
* index cleanup and page compaction, then resume the heap scan with an empty
* TID array.
@ -38,7 +38,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.101 2007/09/26 20:16:28 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.102 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -157,7 +157,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
int nindexes;
BlockNumber possibly_freeable;
PGRUsage ru0;
TimestampTz starttime = 0;
TimestampTz starttime = 0;
pg_rusage_init(&ru0);
@ -212,10 +212,10 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
(errmsg("relation \"%s.%s\" contains more than \"max_fsm_pages\" pages with useful free space",
get_namespace_name(RelationGetNamespace(onerel)),
RelationGetRelationName(onerel)),
errhint((vacrelstats->tot_free_pages > vacrelstats->rel_pages * 0.20 ?
/* Only suggest VACUUM FULL if 20% free */
"Consider using VACUUM FULL on this relation or increasing the configuration parameter \"max_fsm_pages\"." :
"Consider increasing the configuration parameter \"max_fsm_pages\"."))));
errhint((vacrelstats->tot_free_pages > vacrelstats->rel_pages * 0.20 ?
/* Only suggest VACUUM FULL if 20% free */
"Consider using VACUUM FULL on this relation or increasing the configuration parameter \"max_fsm_pages\"." :
"Consider increasing the configuration parameter \"max_fsm_pages\"."))));
/* Update statistics in pg_class */
vac_update_relstats(RelationGetRelid(onerel),
@ -243,8 +243,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
get_namespace_name(RelationGetNamespace(onerel)),
RelationGetRelationName(onerel),
vacrelstats->num_index_scans,
vacrelstats->pages_removed, vacrelstats->rel_pages,
vacrelstats->tuples_deleted, vacrelstats->rel_tuples,
vacrelstats->pages_removed, vacrelstats->rel_pages,
vacrelstats->tuples_deleted, vacrelstats->rel_tuples,
pg_rusage_show(&ru0))));
}
}
@ -350,9 +350,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* page that someone has just added to the relation and not yet
* been able to initialize (see RelationGetBufferForTuple). To
* protect against that, release the buffer lock, grab the
* relation extension lock momentarily, and re-lock the buffer.
* If the page is still uninitialized by then, it must be left
* over from a crashed backend, and we can initialize it.
* relation extension lock momentarily, and re-lock the buffer. If
* the page is still uninitialized by then, it must be left over
* from a crashed backend, and we can initialize it.
*
* We don't really need the relation lock when this is a new or
* temp relation, but it's probably not worth the code space to
@ -389,7 +389,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
continue;
}
/*
/*
* Prune all HOT-update chains in this page.
*
* We count tuples removed by the pruning step as removed by VACUUM.
@ -398,8 +398,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
false, false);
/*
* Now scan the page to collect vacuumable items and check for
* tuples requiring freezing.
* Now scan the page to collect vacuumable items and check for tuples
* requiring freezing.
*/
nfrozen = 0;
hastup = false;
@ -421,19 +421,19 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
}
/* Redirect items mustn't be touched */
if (ItemIdIsRedirected(itemid))
{
if (ItemIdIsRedirected(itemid))
{
hastup = true; /* this page won't be truncatable */
continue;
}
continue;
}
ItemPointerSet(&(tuple.t_self), blkno, offnum);
ItemPointerSet(&(tuple.t_self), blkno, offnum);
/*
* DEAD item pointers are to be vacuumed normally; but we don't
* count them in tups_vacuumed, else we'd be double-counting
* (at least in the common case where heap_page_prune() just
* freed up a non-HOT tuple).
* count them in tups_vacuumed, else we'd be double-counting (at
* least in the common case where heap_page_prune() just freed up
* a non-HOT tuple).
*/
if (ItemIdIsDead(itemid))
{
@ -451,6 +451,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf))
{
case HEAPTUPLE_DEAD:
/*
* Ordinarily, DEAD tuples would have been removed by
* heap_page_prune(), but it's possible that the tuple
@ -460,17 +461,17 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* cannot be considered an error condition.
*
* If the tuple is HOT-updated then it must only be
* removed by a prune operation; so we keep it just as
* if it were RECENTLY_DEAD. Also, if it's a heap-only
* tuple, we choose to keep it, because it'll be a
* lot cheaper to get rid of it in the next pruning pass
* than to treat it like an indexed tuple.
* removed by a prune operation; so we keep it just as if
* it were RECENTLY_DEAD. Also, if it's a heap-only
* tuple, we choose to keep it, because it'll be a lot
* cheaper to get rid of it in the next pruning pass than
* to treat it like an indexed tuple.
*/
if (HeapTupleIsHotUpdated(&tuple) ||
HeapTupleIsHeapOnly(&tuple))
nkeep += 1;
else
tupgone = true; /* we can delete the tuple */
tupgone = true; /* we can delete the tuple */
break;
case HEAPTUPLE_LIVE:
/* Tuple is good --- but let's do some validity checks */
@ -509,8 +510,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
hastup = true;
/*
* Each non-removable tuple must be checked to see if it
* needs freezing. Note we already have exclusive buffer lock.
* Each non-removable tuple must be checked to see if it needs
* freezing. Note we already have exclusive buffer lock.
*/
if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
InvalidBuffer))
@ -864,11 +865,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
RelationTruncate(onerel, new_rel_pages);
/*
* Note: once we have truncated, we *must* keep the exclusive lock
* until commit. The sinval message that will be sent at commit
* (as a result of vac_update_relstats()) must be received by other
* backends, to cause them to reset their rd_targblock values, before
* they can safely access the table again.
* Note: once we have truncated, we *must* keep the exclusive lock until
* commit. The sinval message that will be sent at commit (as a result of
* vac_update_relstats()) must be received by other backends, to cause
* them to reset their rd_targblock values, before they can safely access
* the table again.
*/
/*
@ -933,9 +934,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
/*
* We don't insert a vacuum delay point here, because we have an
* exclusive lock on the table which we want to hold for as short
* a time as possible. We still need to check for interrupts
* however.
* exclusive lock on the table which we want to hold for as short a
* time as possible. We still need to check for interrupts however.
*/
CHECK_FOR_INTERRUPTS();

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.121 2007/08/04 01:26:53 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.122 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -463,16 +463,16 @@ assign_log_timezone(const char *value, bool doit, GucSource source)
{
/*
* UNKNOWN is the value shown as the "default" for log_timezone in
* guc.c. We interpret it as being a complete no-op; we don't
* change the timezone setting. Note that if there is a known
* timezone setting, we will return that name rather than UNKNOWN
* as the canonical spelling.
* guc.c. We interpret it as being a complete no-op; we don't change
* the timezone setting. Note that if there is a known timezone
* setting, we will return that name rather than UNKNOWN as the
* canonical spelling.
*
* During GUC initialization, since the timezone library isn't set
* up yet, pg_get_timezone_name will return NULL and we will leave
* the setting as UNKNOWN. If this isn't overridden from the
* config file then pg_timezone_initialize() will eventually
* select a default value from the environment.
* During GUC initialization, since the timezone library isn't set up
* yet, pg_get_timezone_name will return NULL and we will leave the
* setting as UNKNOWN. If this isn't overridden from the config file
* then pg_timezone_initialize() will eventually select a default
* value from the environment.
*/
if (doit)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.102 2007/08/27 03:36:08 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.103 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -273,6 +273,7 @@ DefineViewRules(Oid viewOid, Query *viewParse, bool replace)
true,
replace,
list_make1(viewParse));
/*
* Someday: automatic ON INSERT, etc
*/
@ -356,8 +357,8 @@ DefineView(ViewStmt *stmt, const char *queryString)
RangeVar *view;
/*
* Run parse analysis to convert the raw parse tree to a Query. Note
* this also acquires sufficient locks on the source table(s).
* Run parse analysis to convert the raw parse tree to a Query. Note this
* also acquires sufficient locks on the source table(s).
*
* Since parse analysis scribbles on its input, copy the raw parse tree;
* this ensures we don't corrupt a prepared statement, for example.
@ -404,14 +405,14 @@ DefineView(ViewStmt *stmt, const char *queryString)
/*
* If the user didn't explicitly ask for a temporary view, check whether
* we need one implicitly. We allow TEMP to be inserted automatically
* as long as the CREATE command is consistent with that --- no explicit
* we need one implicitly. We allow TEMP to be inserted automatically as
* long as the CREATE command is consistent with that --- no explicit
* schema name.
*/
view = stmt->view;
if (!view->istemp && isViewOnTempTable(viewParse))
{
view = copyObject(view); /* don't corrupt original command */
view = copyObject(view); /* don't corrupt original command */
view->istemp = true;
ereport(NOTICE,
(errmsg("view \"%s\" will be a temporary view",