mirror of
https://github.com/postgres/postgres.git
synced 2025-07-30 11:03:19 +03:00
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
This commit is contained in:
@ -296,7 +296,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
|
||||
*
|
||||
* transtype can't be a pseudo-type, since we need to be able to store
|
||||
* values of the transtype. However, we can allow polymorphic transtype
|
||||
* in some cases (AggregateCreate will check). Also, we allow "internal"
|
||||
* in some cases (AggregateCreate will check). Also, we allow "internal"
|
||||
* for functions that want to pass pointers to private data structures;
|
||||
* but allow that only to superusers, since you could crash the system (or
|
||||
* worse) by connecting up incompatible internal-using functions in an
|
||||
@ -317,7 +317,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
|
||||
}
|
||||
|
||||
/*
|
||||
* If a moving-aggregate transtype is specified, look that up. Same
|
||||
* If a moving-aggregate transtype is specified, look that up. Same
|
||||
* restrictions as for transtype.
|
||||
*/
|
||||
if (mtransType)
|
||||
|
@ -296,7 +296,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
|
||||
}
|
||||
|
||||
/*
|
||||
* Executes an ALTER OBJECT / RENAME TO statement. Based on the object
|
||||
* Executes an ALTER OBJECT / RENAME TO statement. Based on the object
|
||||
* type, the function appropriate to that type is executed.
|
||||
*/
|
||||
Oid
|
||||
|
@ -409,7 +409,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
|
||||
|
||||
/*
|
||||
* Open all indexes of the relation, and see if there are any analyzable
|
||||
* columns in the indexes. We do not analyze index columns if there was
|
||||
* columns in the indexes. We do not analyze index columns if there was
|
||||
* an explicit column list in the ANALYZE command, however. If we are
|
||||
* doing a recursive scan, we don't want to touch the parent's indexes at
|
||||
* all.
|
||||
@ -466,7 +466,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
|
||||
|
||||
/*
|
||||
* Determine how many rows we need to sample, using the worst case from
|
||||
* all analyzable columns. We use a lower bound of 100 rows to avoid
|
||||
* all analyzable columns. We use a lower bound of 100 rows to avoid
|
||||
* possible overflow in Vitter's algorithm. (Note: that will also be the
|
||||
* target in the corner case where there are no analyzable columns.)
|
||||
*/
|
||||
@ -501,7 +501,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
|
||||
&totalrows, &totaldeadrows);
|
||||
|
||||
/*
|
||||
* Compute the statistics. Temporary results during the calculations for
|
||||
* Compute the statistics. Temporary results during the calculations for
|
||||
* each column are stored in a child context. The calc routines are
|
||||
* responsible to make sure that whatever they store into the VacAttrStats
|
||||
* structure is allocated in anl_context.
|
||||
@ -558,7 +558,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
|
||||
|
||||
/*
|
||||
* Emit the completed stats rows into pg_statistic, replacing any
|
||||
* previous statistics for the target columns. (If there are stats in
|
||||
* previous statistics for the target columns. (If there are stats in
|
||||
* pg_statistic for columns we didn't process, we leave them alone.)
|
||||
*/
|
||||
update_attstats(RelationGetRelid(onerel), inh,
|
||||
@ -610,7 +610,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
|
||||
}
|
||||
|
||||
/*
|
||||
* Report ANALYZE to the stats collector, too. However, if doing
|
||||
* Report ANALYZE to the stats collector, too. However, if doing
|
||||
* inherited stats we shouldn't report, because the stats collector only
|
||||
* tracks per-table stats.
|
||||
*/
|
||||
@ -872,7 +872,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Create the VacAttrStats struct. Note that we only have a copy of the
|
||||
* Create the VacAttrStats struct. Note that we only have a copy of the
|
||||
* fixed fields of the pg_attribute tuple.
|
||||
*/
|
||||
stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
|
||||
@ -882,7 +882,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
|
||||
/*
|
||||
* When analyzing an expression index, believe the expression tree's type
|
||||
* not the column datatype --- the latter might be the opckeytype storage
|
||||
* type of the opclass, which is not interesting for our purposes. (Note:
|
||||
* type of the opclass, which is not interesting for our purposes. (Note:
|
||||
* if we did anything with non-expression index columns, we'd need to
|
||||
* figure out where to get the correct type info from, but for now that's
|
||||
* not a problem.) It's not clear whether anyone will care about the
|
||||
@ -921,7 +921,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the type-specific typanalyze function. If none is specified, use
|
||||
* Call the type-specific typanalyze function. If none is specified, use
|
||||
* std_typanalyze().
|
||||
*/
|
||||
if (OidIsValid(stats->attrtype->typanalyze))
|
||||
@ -997,7 +997,7 @@ BlockSampler_Next(BlockSampler bs)
|
||||
* If we are to skip, we should advance t (hence decrease K), and
|
||||
* repeat the same probabilistic test for the next block. The naive
|
||||
* implementation thus requires an anl_random_fract() call for each block
|
||||
* number. But we can reduce this to one anl_random_fract() call per
|
||||
* number. But we can reduce this to one anl_random_fract() call per
|
||||
* selected block, by noting that each time the while-test succeeds,
|
||||
* we can reinterpret V as a uniform random number in the range 0 to p.
|
||||
* Therefore, instead of choosing a new V, we just adjust p to be
|
||||
@ -1127,7 +1127,7 @@ acquire_sample_rows(Relation onerel, int elevel,
|
||||
/*
|
||||
* We ignore unused and redirect line pointers. DEAD line
|
||||
* pointers should be counted as dead, because we need vacuum to
|
||||
* run to get rid of them. Note that this rule agrees with the
|
||||
* run to get rid of them. Note that this rule agrees with the
|
||||
* way that heap_page_prune() counts things.
|
||||
*/
|
||||
if (!ItemIdIsNormal(itemid))
|
||||
@ -1173,7 +1173,7 @@ acquire_sample_rows(Relation onerel, int elevel,
|
||||
* is the safer option.
|
||||
*
|
||||
* A special case is that the inserting transaction might
|
||||
* be our own. In this case we should count and sample
|
||||
* be our own. In this case we should count and sample
|
||||
* the row, to accommodate users who load a table and
|
||||
* analyze it in one transaction. (pgstat_report_analyze
|
||||
* has to adjust the numbers we send to the stats
|
||||
@ -1215,7 +1215,7 @@ acquire_sample_rows(Relation onerel, int elevel,
|
||||
/*
|
||||
* The first targrows sample rows are simply copied into the
|
||||
* reservoir. Then we start replacing tuples in the sample
|
||||
* until we reach the end of the relation. This algorithm is
|
||||
* until we reach the end of the relation. This algorithm is
|
||||
* from Jeff Vitter's paper (see full citation below). It
|
||||
* works by repeatedly computing the number of tuples to skip
|
||||
* before selecting a tuple, which replaces a randomly chosen
|
||||
@ -1274,7 +1274,7 @@ acquire_sample_rows(Relation onerel, int elevel,
|
||||
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
|
||||
|
||||
/*
|
||||
* Estimate total numbers of rows in relation. For live rows, use
|
||||
* Estimate total numbers of rows in relation. For live rows, use
|
||||
* vac_estimate_reltuples; for dead rows, we have no source of old
|
||||
* information, so we have to assume the density is the same in unseen
|
||||
* pages as in the pages we scanned.
|
||||
@ -1597,7 +1597,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel,
|
||||
* Statistics are stored in several places: the pg_class row for the
|
||||
* relation has stats about the whole relation, and there is a
|
||||
* pg_statistic row for each (non-system) attribute that has ever
|
||||
* been analyzed. The pg_class values are updated by VACUUM, not here.
|
||||
* been analyzed. The pg_class values are updated by VACUUM, not here.
|
||||
*
|
||||
* pg_statistic rows are just added or updated normally. This means
|
||||
* that pg_statistic will probably contain some deleted rows at the
|
||||
@ -2001,7 +2001,7 @@ compute_minimal_stats(VacAttrStatsP stats,
|
||||
/*
|
||||
* If the value is toasted, we want to detoast it just once to
|
||||
* avoid repeated detoastings and resultant excess memory usage
|
||||
* during the comparisons. Also, check to see if the value is
|
||||
* during the comparisons. Also, check to see if the value is
|
||||
* excessively wide, and if so don't detoast at all --- just
|
||||
* ignore the value.
|
||||
*/
|
||||
@ -2121,7 +2121,7 @@ compute_minimal_stats(VacAttrStatsP stats,
|
||||
* We assume (not very reliably!) that all the multiply-occurring
|
||||
* values are reflected in the final track[] list, and the other
|
||||
* nonnull values all appeared but once. (XXX this usually
|
||||
* results in a drastic overestimate of ndistinct. Can we do
|
||||
* results in a drastic overestimate of ndistinct. Can we do
|
||||
* any better?)
|
||||
*----------
|
||||
*/
|
||||
@ -2158,7 +2158,7 @@ compute_minimal_stats(VacAttrStatsP stats,
|
||||
* Decide how many values are worth storing as most-common values. If
|
||||
* we are able to generate a complete MCV list (all the values in the
|
||||
* sample will fit, and we think these are all the ones in the table),
|
||||
* then do so. Otherwise, store only those values that are
|
||||
* then do so. Otherwise, store only those values that are
|
||||
* significantly more common than the (estimated) average. We set the
|
||||
* threshold rather arbitrarily at 25% more than average, with at
|
||||
* least 2 instances in the sample.
|
||||
@ -2326,7 +2326,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
/*
|
||||
* If the value is toasted, we want to detoast it just once to
|
||||
* avoid repeated detoastings and resultant excess memory usage
|
||||
* during the comparisons. Also, check to see if the value is
|
||||
* during the comparisons. Also, check to see if the value is
|
||||
* excessively wide, and if so don't detoast at all --- just
|
||||
* ignore the value.
|
||||
*/
|
||||
@ -2371,7 +2371,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
* accumulate ordering-correlation statistics.
|
||||
*
|
||||
* To determine which are most common, we first have to count the
|
||||
* number of duplicates of each value. The duplicates are adjacent in
|
||||
* number of duplicates of each value. The duplicates are adjacent in
|
||||
* the sorted list, so a brute-force approach is to compare successive
|
||||
* datum values until we find two that are not equal. However, that
|
||||
* requires N-1 invocations of the datum comparison routine, which are
|
||||
@ -2380,7 +2380,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
* that are adjacent in the sorted order; otherwise it could not know
|
||||
* that it's ordered the pair correctly.) We exploit this by having
|
||||
* compare_scalars remember the highest tupno index that each
|
||||
* ScalarItem has been found equal to. At the end of the sort, a
|
||||
* ScalarItem has been found equal to. At the end of the sort, a
|
||||
* ScalarItem's tupnoLink will still point to itself if and only if it
|
||||
* is the last item of its group of duplicates (since the group will
|
||||
* be ordered by tupno).
|
||||
@ -2500,7 +2500,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
* Decide how many values are worth storing as most-common values. If
|
||||
* we are able to generate a complete MCV list (all the values in the
|
||||
* sample will fit, and we think these are all the ones in the table),
|
||||
* then do so. Otherwise, store only those values that are
|
||||
* then do so. Otherwise, store only those values that are
|
||||
* significantly more common than the (estimated) average. We set the
|
||||
* threshold rather arbitrarily at 25% more than average, with at
|
||||
* least 2 instances in the sample. Also, we won't suppress values
|
||||
@ -2655,7 +2655,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
|
||||
/*
|
||||
* The object of this loop is to copy the first and last values[]
|
||||
* entries along with evenly-spaced values in between. So the
|
||||
* entries along with evenly-spaced values in between. So the
|
||||
* i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
|
||||
* computing that subscript directly risks integer overflow when
|
||||
* the stats target is more than a couple thousand. Instead we
|
||||
@ -2766,7 +2766,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
||||
* qsort_arg comparator for sorting ScalarItems
|
||||
*
|
||||
* Aside from sorting the items, we update the tupnoLink[] array
|
||||
* whenever two ScalarItems are found to contain equal datums. The array
|
||||
* whenever two ScalarItems are found to contain equal datums. The array
|
||||
* is indexed by tupno; for each ScalarItem, it contains the highest
|
||||
* tupno that that item's datum has been found to be equal to. This allows
|
||||
* us to avoid additional comparisons in compute_scalar_stats().
|
||||
|
@ -151,7 +151,7 @@
|
||||
*
|
||||
* This struct declaration has the maximal length, but in a real queue entry
|
||||
* the data area is only big enough for the actual channel and payload strings
|
||||
* (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
|
||||
* (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
|
||||
* entry size, if both channel and payload strings are empty (but note it
|
||||
* doesn't include alignment padding).
|
||||
*
|
||||
@ -265,7 +265,7 @@ static SlruCtlData AsyncCtlData;
|
||||
*
|
||||
* The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2
|
||||
* pages, because more than that would confuse slru.c into thinking there
|
||||
* was a wraparound condition. With the default BLCKSZ this means there
|
||||
* was a wraparound condition. With the default BLCKSZ this means there
|
||||
* can be up to 8GB of queued-and-not-read data.
|
||||
*
|
||||
* Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of
|
||||
@ -395,7 +395,7 @@ asyncQueuePagePrecedes(int p, int q)
|
||||
int diff;
|
||||
|
||||
/*
|
||||
* We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
|
||||
* We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
|
||||
* in the range 0..QUEUE_MAX_PAGE.
|
||||
*/
|
||||
Assert(p >= 0 && p <= QUEUE_MAX_PAGE);
|
||||
@ -826,7 +826,7 @@ PreCommit_Notify(void)
|
||||
while (nextNotify != NULL)
|
||||
{
|
||||
/*
|
||||
* Add the pending notifications to the queue. We acquire and
|
||||
* Add the pending notifications to the queue. We acquire and
|
||||
* release AsyncQueueLock once per page, which might be overkill
|
||||
* but it does allow readers to get in while we're doing this.
|
||||
*
|
||||
@ -1042,12 +1042,12 @@ Exec_UnlistenAllCommit(void)
|
||||
* The reason that this is not done in AtCommit_Notify is that there is
|
||||
* a nonzero chance of errors here (for example, encoding conversion errors
|
||||
* while trying to format messages to our frontend). An error during
|
||||
* AtCommit_Notify would be a PANIC condition. The timing is also arranged
|
||||
* AtCommit_Notify would be a PANIC condition. The timing is also arranged
|
||||
* to ensure that a transaction's self-notifies are delivered to the frontend
|
||||
* before it gets the terminating ReadyForQuery message.
|
||||
*
|
||||
* Note that we send signals and process the queue even if the transaction
|
||||
* eventually aborted. This is because we need to clean out whatever got
|
||||
* eventually aborted. This is because we need to clean out whatever got
|
||||
* added to the queue.
|
||||
*
|
||||
* NOTE: we are outside of any transaction here.
|
||||
@ -1137,7 +1137,7 @@ IsListeningOn(const char *channel)
|
||||
|
||||
/*
|
||||
* Remove our entry from the listeners array when we are no longer listening
|
||||
* on any channel. NB: must not fail if we're already not listening.
|
||||
* on any channel. NB: must not fail if we're already not listening.
|
||||
*/
|
||||
static void
|
||||
asyncQueueUnregister(void)
|
||||
@ -1179,7 +1179,7 @@ asyncQueueIsFull(void)
|
||||
/*
|
||||
* The queue is full if creating a new head page would create a page that
|
||||
* logically precedes the current global tail pointer, ie, the head
|
||||
* pointer would wrap around compared to the tail. We cannot create such
|
||||
* pointer would wrap around compared to the tail. We cannot create such
|
||||
* a head page for fear of confusing slru.c. For safety we round the tail
|
||||
* pointer back to a segment boundary (compare the truncation logic in
|
||||
* asyncQueueAdvanceTail).
|
||||
@ -1198,7 +1198,7 @@ asyncQueueIsFull(void)
|
||||
|
||||
/*
|
||||
* Advance the QueuePosition to the next entry, assuming that the current
|
||||
* entry is of length entryLength. If we jump to a new page the function
|
||||
* entry is of length entryLength. If we jump to a new page the function
|
||||
* returns true, else false.
|
||||
*/
|
||||
static bool
|
||||
@ -1267,7 +1267,7 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
|
||||
* the last byte which simplifies reading the page later.
|
||||
*
|
||||
* We are passed the list cell containing the next notification to write
|
||||
* and return the first still-unwritten cell back. Eventually we will return
|
||||
* and return the first still-unwritten cell back. Eventually we will return
|
||||
* NULL indicating all is done.
|
||||
*
|
||||
* We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock
|
||||
@ -1344,7 +1344,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
|
||||
* Page is full, so we're done here, but first fill the next page
|
||||
* with zeroes. The reason to do this is to ensure that slru.c's
|
||||
* idea of the head page is always the same as ours, which avoids
|
||||
* boundary problems in SimpleLruTruncate. The test in
|
||||
* boundary problems in SimpleLruTruncate. The test in
|
||||
* asyncQueueIsFull() ensured that there is room to create this
|
||||
* page without overrunning the queue.
|
||||
*/
|
||||
@ -1518,7 +1518,7 @@ AtAbort_Notify(void)
|
||||
/*
|
||||
* If we LISTEN but then roll back the transaction after PreCommit_Notify,
|
||||
* we have registered as a listener but have not made any entry in
|
||||
* listenChannels. In that case, deregister again.
|
||||
* listenChannels. In that case, deregister again.
|
||||
*/
|
||||
if (amRegisteredListener && listenChannels == NIL)
|
||||
asyncQueueUnregister();
|
||||
@ -1771,7 +1771,7 @@ EnableNotifyInterrupt(void)
|
||||
* is disabled until the next EnableNotifyInterrupt call.
|
||||
*
|
||||
* The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this,
|
||||
* so as to prevent conflicts if one signal interrupts the other. So we
|
||||
* so as to prevent conflicts if one signal interrupts the other. So we
|
||||
* must return the previous state of the flag.
|
||||
*/
|
||||
bool
|
||||
@ -1866,7 +1866,7 @@ asyncQueueReadAllNotifications(void)
|
||||
/*
|
||||
* We copy the data from SLRU into a local buffer, so as to avoid
|
||||
* holding the AsyncCtlLock while we are examining the entries and
|
||||
* possibly transmitting them to our frontend. Copy only the part
|
||||
* possibly transmitting them to our frontend. Copy only the part
|
||||
* of the page we will actually inspect.
|
||||
*/
|
||||
slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage,
|
||||
@ -1940,7 +1940,7 @@ asyncQueueReadAllNotifications(void)
|
||||
* and deliver relevant ones to my frontend.
|
||||
*
|
||||
* The current page must have been fetched into page_buffer from shared
|
||||
* memory. (We could access the page right in shared memory, but that
|
||||
* memory. (We could access the page right in shared memory, but that
|
||||
* would imply holding the AsyncCtlLock throughout this routine.)
|
||||
*
|
||||
* We stop if we reach the "stop" position, or reach a notification from an
|
||||
@ -2146,7 +2146,7 @@ NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
|
||||
pq_endmessage(&buf);
|
||||
|
||||
/*
|
||||
* NOTE: we do not do pq_flush() here. For a self-notify, it will
|
||||
* NOTE: we do not do pq_flush() here. For a self-notify, it will
|
||||
* happen at the end of the transaction, and for incoming notifies
|
||||
* ProcessIncomingNotify will do it after finding all the notifies.
|
||||
*/
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* cluster.c
|
||||
* CLUSTER a table on an index. This is now also used for VACUUM FULL.
|
||||
* CLUSTER a table on an index. This is now also used for VACUUM FULL.
|
||||
*
|
||||
* There is hardly anything left of Paul Brown's original implementation...
|
||||
*
|
||||
@ -94,7 +94,7 @@ static void reform_and_rewrite_tuple(HeapTuple tuple,
|
||||
*
|
||||
* The single-relation case does not have any such overhead.
|
||||
*
|
||||
* We also allow a relation to be specified without index. In that case,
|
||||
* We also allow a relation to be specified without index. In that case,
|
||||
* the indisclustered bit will be looked up, and an ERROR will be thrown
|
||||
* if there is no index with the bit set.
|
||||
*---------------------------------------------------------------------------
|
||||
@ -206,7 +206,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
/*
|
||||
* Build the list of relations to cluster. Note that this lives in
|
||||
* Build the list of relations to cluster. Note that this lives in
|
||||
* cluster_context.
|
||||
*/
|
||||
rvs = get_tables_to_cluster(cluster_context);
|
||||
@ -243,7 +243,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
|
||||
*
|
||||
* This clusters the table by creating a new, clustered table and
|
||||
* swapping the relfilenodes of the new table and the old table, so
|
||||
* the OID of the original table is preserved. Thus we do not lose
|
||||
* the OID of the original table is preserved. Thus we do not lose
|
||||
* GRANT, inheritance nor references to this table (this was a bug
|
||||
* in releases thru 7.3).
|
||||
*
|
||||
@ -252,7 +252,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
|
||||
* them incrementally while we load the table.
|
||||
*
|
||||
* If indexOid is InvalidOid, the table will be rewritten in physical order
|
||||
* instead of index order. This is the new implementation of VACUUM FULL,
|
||||
* instead of index order. This is the new implementation of VACUUM FULL,
|
||||
* and error messages should refer to the operation as VACUUM not CLUSTER.
|
||||
*/
|
||||
void
|
||||
@ -265,7 +265,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose)
|
||||
|
||||
/*
|
||||
* We grab exclusive access to the target rel and index for the duration
|
||||
* of the transaction. (This is redundant for the single-transaction
|
||||
* of the transaction. (This is redundant for the single-transaction
|
||||
* case, since cluster() already did it.) The index lock is taken inside
|
||||
* check_index_is_clusterable.
|
||||
*/
|
||||
@ -300,7 +300,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose)
|
||||
* check in the "recheck" case is appropriate (which currently means
|
||||
* somebody is executing a database-wide CLUSTER), because there is
|
||||
* another check in cluster() which will stop any attempt to cluster
|
||||
* remote temp tables by name. There is another check in cluster_rel
|
||||
* remote temp tables by name. There is another check in cluster_rel
|
||||
* which is redundant, but we leave it for extra safety.
|
||||
*/
|
||||
if (RELATION_IS_OTHER_TEMP(OldHeap))
|
||||
@ -393,7 +393,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose)
|
||||
|
||||
/*
|
||||
* All predicate locks on the tuples or pages are about to be made
|
||||
* invalid, because we move tuples around. Promote them to relation
|
||||
* invalid, because we move tuples around. Promote them to relation
|
||||
* locks. Predicate locks on indexes will be promoted when they are
|
||||
* reindexed.
|
||||
*/
|
||||
@ -440,7 +440,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMOD
|
||||
|
||||
/*
|
||||
* Disallow clustering on incomplete indexes (those that might not index
|
||||
* every row of the relation). We could relax this by making a separate
|
||||
* every row of the relation). We could relax this by making a separate
|
||||
* seqscan pass over the table to copy the missing rows, but that seems
|
||||
* expensive and tedious.
|
||||
*/
|
||||
@ -649,14 +649,14 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp,
|
||||
|
||||
/*
|
||||
* Create the new heap, using a temporary name in the same namespace as
|
||||
* the existing table. NOTE: there is some risk of collision with user
|
||||
* the existing table. NOTE: there is some risk of collision with user
|
||||
* relnames. Working around this seems more trouble than it's worth; in
|
||||
* particular, we can't create the new heap in a different namespace from
|
||||
* the old, or we will have problems with the TEMP status of temp tables.
|
||||
*
|
||||
* Note: the new heap is not a shared relation, even if we are rebuilding
|
||||
* a shared rel. However, we do make the new heap mapped if the source is
|
||||
* mapped. This simplifies swap_relation_files, and is absolutely
|
||||
* mapped. This simplifies swap_relation_files, and is absolutely
|
||||
* necessary for rebuilding pg_class, for reasons explained there.
|
||||
*/
|
||||
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap);
|
||||
@ -696,11 +696,11 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp,
|
||||
*
|
||||
* If the relation doesn't have a TOAST table already, we can't need one
|
||||
* for the new relation. The other way around is possible though: if some
|
||||
* wide columns have been dropped, NewHeapCreateToastTable can decide
|
||||
* that no TOAST table is needed for the new table.
|
||||
* wide columns have been dropped, NewHeapCreateToastTable can decide that
|
||||
* no TOAST table is needed for the new table.
|
||||
*
|
||||
* Note that NewHeapCreateToastTable ends with CommandCounterIncrement,
|
||||
* so that the TOAST table will be visible for insertion.
|
||||
* Note that NewHeapCreateToastTable ends with CommandCounterIncrement, so
|
||||
* that the TOAST table will be visible for insertion.
|
||||
*/
|
||||
toastid = OldHeap->rd_rel->reltoastrelid;
|
||||
if (OidIsValid(toastid))
|
||||
@ -788,12 +788,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
|
||||
|
||||
/*
|
||||
* If the OldHeap has a toast table, get lock on the toast table to keep
|
||||
* it from being vacuumed. This is needed because autovacuum processes
|
||||
* it from being vacuumed. This is needed because autovacuum processes
|
||||
* toast tables independently of their main tables, with no lock on the
|
||||
* latter. If an autovacuum were to start on the toast table after we
|
||||
* latter. If an autovacuum were to start on the toast table after we
|
||||
* compute our OldestXmin below, it would use a later OldestXmin, and then
|
||||
* possibly remove as DEAD toast tuples belonging to main tuples we think
|
||||
* are only RECENTLY_DEAD. Then we'd fail while trying to copy those
|
||||
* are only RECENTLY_DEAD. Then we'd fail while trying to copy those
|
||||
* tuples.
|
||||
*
|
||||
* We don't need to open the toast relation here, just lock it. The lock
|
||||
@ -814,7 +814,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
|
||||
/*
|
||||
* If both tables have TOAST tables, perform toast swap by content. It is
|
||||
* possible that the old table has a toast table but the new one doesn't,
|
||||
* if toastable columns have been dropped. In that case we have to do
|
||||
* if toastable columns have been dropped. In that case we have to do
|
||||
* swap by links. This is okay because swap by content is only essential
|
||||
* for system catalogs, and we don't support schema changes for them.
|
||||
*/
|
||||
@ -833,7 +833,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
|
||||
*
|
||||
* Note that we must hold NewHeap open until we are done writing data,
|
||||
* since the relcache will not guarantee to remember this setting once
|
||||
* the relation is closed. Also, this technique depends on the fact
|
||||
* the relation is closed. Also, this technique depends on the fact
|
||||
* that no one will try to read from the NewHeap until after we've
|
||||
* finished writing it and swapping the rels --- otherwise they could
|
||||
* follow the toast pointers to the wrong place. (It would actually
|
||||
@ -929,7 +929,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
|
||||
/*
|
||||
* Scan through the OldHeap, either in OldIndex order or sequentially;
|
||||
* copy each tuple into the NewHeap, or transiently to the tuplesort
|
||||
* module. Note that we don't bother sorting dead tuples (they won't get
|
||||
* module. Note that we don't bother sorting dead tuples (they won't get
|
||||
* to the new table anyway).
|
||||
*/
|
||||
for (;;)
|
||||
@ -1217,7 +1217,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
|
||||
NameStr(relform2->relname), r2);
|
||||
|
||||
/*
|
||||
* Send replacement mappings to relmapper. Note these won't actually
|
||||
* Send replacement mappings to relmapper. Note these won't actually
|
||||
* take effect until CommandCounterIncrement.
|
||||
*/
|
||||
RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
|
||||
@ -1404,7 +1404,8 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
|
||||
relform1->relkind == RELKIND_TOASTVALUE &&
|
||||
relform2->relkind == RELKIND_TOASTVALUE)
|
||||
{
|
||||
Oid toastIndex1, toastIndex2;
|
||||
Oid toastIndex1,
|
||||
toastIndex2;
|
||||
|
||||
/* Get valid index for each relation */
|
||||
toastIndex1 = toast_get_valid_index(r1,
|
||||
@ -1440,7 +1441,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
|
||||
* non-transient relation.)
|
||||
*
|
||||
* Caution: the placement of this step interacts with the decision to
|
||||
* handle toast rels by recursion. When we are trying to rebuild pg_class
|
||||
* handle toast rels by recursion. When we are trying to rebuild pg_class
|
||||
* itself, the smgr close on pg_class must happen after all accesses in
|
||||
* this function.
|
||||
*/
|
||||
@ -1487,9 +1488,9 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
|
||||
|
||||
/*
|
||||
* Rebuild each index on the relation (but not the toast table, which is
|
||||
* all-new at this point). It is important to do this before the DROP
|
||||
* all-new at this point). It is important to do this before the DROP
|
||||
* step because if we are processing a system catalog that will be used
|
||||
* during DROP, we want to have its indexes available. There is no
|
||||
* during DROP, we want to have its indexes available. There is no
|
||||
* advantage to the other order anyway because this is all transactional,
|
||||
* so no chance to reclaim disk space before commit. We do not need a
|
||||
* final CommandCounterIncrement() because reindex_relation does it.
|
||||
@ -1511,11 +1512,11 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
|
||||
* swap_relation_files()), thus relfrozenxid was not updated. That's
|
||||
* annoying because a potential reason for doing a VACUUM FULL is a
|
||||
* imminent or actual anti-wraparound shutdown. So, now that we can
|
||||
* access the new relation using it's indices, update
|
||||
* relfrozenxid. pg_class doesn't have a toast relation, so we don't need
|
||||
* to update the corresponding toast relation. Not that there's little
|
||||
* point moving all relfrozenxid updates here since swap_relation_files()
|
||||
* needs to write to pg_class for non-mapped relations anyway.
|
||||
* access the new relation using it's indices, update relfrozenxid.
|
||||
* pg_class doesn't have a toast relation, so we don't need to update the
|
||||
* corresponding toast relation. Not that there's little point moving all
|
||||
* relfrozenxid updates here since swap_relation_files() needs to write to
|
||||
* pg_class for non-mapped relations anyway.
|
||||
*/
|
||||
if (OIDOldHeap == RelationRelationId)
|
||||
{
|
||||
|
@ -50,7 +50,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
|
||||
bool isnull[INDEX_MAX_KEYS];
|
||||
|
||||
/*
|
||||
* Make sure this is being called as an AFTER ROW trigger. Note:
|
||||
* Make sure this is being called as an AFTER ROW trigger. Note:
|
||||
* translatable error strings are shared with ri_triggers.c, so resist the
|
||||
* temptation to fold the function name into them.
|
||||
*/
|
||||
@ -87,7 +87,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
|
||||
* If the new_row is now dead (ie, inserted and then deleted within our
|
||||
* transaction), we can skip the check. However, we have to be careful,
|
||||
* because this trigger gets queued only in response to index insertions;
|
||||
* which means it does not get queued for HOT updates. The row we are
|
||||
* which means it does not get queued for HOT updates. The row we are
|
||||
* called for might now be dead, but have a live HOT child, in which case
|
||||
* we still need to make the check. Therefore we have to use
|
||||
* heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in
|
||||
|
@ -125,8 +125,8 @@ typedef struct CopyStateData
|
||||
bool *force_quote_flags; /* per-column CSV FQ flags */
|
||||
List *force_notnull; /* list of column names */
|
||||
bool *force_notnull_flags; /* per-column CSV FNN flags */
|
||||
List *force_null; /* list of column names */
|
||||
bool *force_null_flags; /* per-column CSV FN flags */
|
||||
List *force_null; /* list of column names */
|
||||
bool *force_null_flags; /* per-column CSV FN flags */
|
||||
bool convert_selectively; /* do selective binary conversion? */
|
||||
List *convert_select; /* list of column names (can be NIL) */
|
||||
bool *convert_select_flags; /* per-column CSV/TEXT CS flags */
|
||||
@ -189,7 +189,7 @@ typedef struct CopyStateData
|
||||
|
||||
/*
|
||||
* Finally, raw_buf holds raw data read from the data source (file or
|
||||
* client connection). CopyReadLine parses this data sufficiently to
|
||||
* client connection). CopyReadLine parses this data sufficiently to
|
||||
* locate line boundaries, then transfers the data to line_buf and
|
||||
* converts it. Note: we guarantee that there is a \0 at
|
||||
* raw_buf[raw_buf_len].
|
||||
@ -215,7 +215,7 @@ typedef struct
|
||||
* function call overhead in tight COPY loops.
|
||||
*
|
||||
* We must use "if (1)" because the usual "do {...} while(0)" wrapper would
|
||||
* prevent the continue/break processing from working. We end the "if (1)"
|
||||
* prevent the continue/break processing from working. We end the "if (1)"
|
||||
* with "else ((void) 0)" to ensure the "if" does not unintentionally match
|
||||
* any "else" in the calling code, and to avoid any compiler warnings about
|
||||
* empty statements. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
|
||||
@ -549,7 +549,7 @@ CopySendEndOfRow(CopyState cstate)
|
||||
* CopyGetData reads data from the source (file or frontend)
|
||||
*
|
||||
* We attempt to read at least minread, and at most maxread, bytes from
|
||||
* the source. The actual number of bytes read is returned; if this is
|
||||
* the source. The actual number of bytes read is returned; if this is
|
||||
* less than minread, EOF was detected.
|
||||
*
|
||||
* Note: when copying from the frontend, we expect a proper EOF mark per
|
||||
@ -766,7 +766,7 @@ CopyLoadRawBuf(CopyState cstate)
|
||||
* we also support copying the output of an arbitrary SELECT query.
|
||||
*
|
||||
* If <pipe> is false, transfer is between the table and the file named
|
||||
* <filename>. Otherwise, transfer is between the table and our regular
|
||||
* <filename>. Otherwise, transfer is between the table and our regular
|
||||
* input/output stream. The latter could be either stdin/stdout or a
|
||||
* socket, depending on whether we're running under Postmaster control.
|
||||
*
|
||||
@ -1203,7 +1203,7 @@ ProcessCopyOptions(CopyState cstate,
|
||||
if (cstate->force_null != NIL && !is_from)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("COPY force null only available using COPY FROM")));
|
||||
errmsg("COPY force null only available using COPY FROM")));
|
||||
|
||||
/* Don't allow the delimiter to appear in the null string. */
|
||||
if (strchr(cstate->null_print, cstate->delim[0]) != NULL)
|
||||
@ -1298,7 +1298,7 @@ BeginCopy(bool is_from,
|
||||
errmsg("COPY (SELECT) WITH OIDS is not supported")));
|
||||
|
||||
/*
|
||||
* Run parse analysis and rewrite. Note this also acquires sufficient
|
||||
* Run parse analysis and rewrite. Note this also acquires sufficient
|
||||
* locks on the source table(s).
|
||||
*
|
||||
* Because the parser and planner tend to scribble on their input, we
|
||||
@ -1428,8 +1428,8 @@ BeginCopy(bool is_from,
|
||||
if (!list_member_int(cstate->attnumlist, attnum))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
|
||||
errmsg("FORCE NULL column \"%s\" not referenced by COPY",
|
||||
NameStr(tupDesc->attrs[attnum - 1]->attname))));
|
||||
errmsg("FORCE NULL column \"%s\" not referenced by COPY",
|
||||
NameStr(tupDesc->attrs[attnum - 1]->attname))));
|
||||
cstate->force_null_flags[attnum - 1] = true;
|
||||
}
|
||||
}
|
||||
@ -1730,7 +1730,7 @@ CopyTo(CopyState cstate)
|
||||
* Create a temporary memory context that we can reset once per row to
|
||||
* recover palloc'd memory. This avoids any problems with leaks inside
|
||||
* datatype output routines, and should be faster than retail pfree's
|
||||
* anyway. (We don't need a whole econtext as CopyFrom does.)
|
||||
* anyway. (We don't need a whole econtext as CopyFrom does.)
|
||||
*/
|
||||
cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"COPY TO",
|
||||
@ -2248,8 +2248,8 @@ CopyFrom(CopyState cstate)
|
||||
{
|
||||
/*
|
||||
* Reset the per-tuple exprcontext. We can only do this if the
|
||||
* tuple buffer is empty. (Calling the context the per-tuple memory
|
||||
* context is a bit of a misnomer now.)
|
||||
* tuple buffer is empty. (Calling the context the per-tuple
|
||||
* memory context is a bit of a misnomer now.)
|
||||
*/
|
||||
ResetPerTupleExprContext(estate);
|
||||
}
|
||||
@ -2569,19 +2569,20 @@ BeginCopyFrom(Relation rel,
|
||||
num_defaults++;
|
||||
|
||||
/*
|
||||
* If a default expression looks at the table being loaded, then
|
||||
* it could give the wrong answer when using multi-insert. Since
|
||||
* database access can be dynamic this is hard to test for
|
||||
* exactly, so we use the much wider test of whether the
|
||||
* default expression is volatile. We allow for the special case
|
||||
* of when the default expression is the nextval() of a sequence
|
||||
* which in this specific case is known to be safe for use with
|
||||
* the multi-insert optimisation. Hence we use this special case
|
||||
* function checker rather than the standard check for
|
||||
* If a default expression looks at the table being loaded,
|
||||
* then it could give the wrong answer when using
|
||||
* multi-insert. Since database access can be dynamic this is
|
||||
* hard to test for exactly, so we use the much wider test of
|
||||
* whether the default expression is volatile. We allow for
|
||||
* the special case of when the default expression is the
|
||||
* nextval() of a sequence which in this specific case is
|
||||
* known to be safe for use with the multi-insert
|
||||
* optimisation. Hence we use this special case function
|
||||
* checker rather than the standard check for
|
||||
* contain_volatile_functions().
|
||||
*/
|
||||
if (!volatile_defexprs)
|
||||
volatile_defexprs = contain_volatile_functions_not_nextval((Node *)defexpr);
|
||||
volatile_defexprs = contain_volatile_functions_not_nextval((Node *) defexpr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2861,8 +2862,8 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
|
||||
|
||||
if (cstate->csv_mode)
|
||||
{
|
||||
if(string == NULL &&
|
||||
cstate->force_notnull_flags[m])
|
||||
if (string == NULL &&
|
||||
cstate->force_notnull_flags[m])
|
||||
{
|
||||
/*
|
||||
* FORCE_NOT_NULL option is set and column is NULL -
|
||||
@ -2870,14 +2871,14 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
|
||||
*/
|
||||
string = cstate->null_print;
|
||||
}
|
||||
else if(string != NULL && cstate->force_null_flags[m]
|
||||
&& strcmp(string,cstate->null_print) == 0 )
|
||||
else if (string != NULL && cstate->force_null_flags[m]
|
||||
&& strcmp(string, cstate->null_print) == 0)
|
||||
{
|
||||
/*
|
||||
* FORCE_NULL option is set and column matches the NULL string.
|
||||
* It must have been quoted, or otherwise the string would already
|
||||
* have been set to NULL.
|
||||
* Convert it to NULL as specified.
|
||||
* FORCE_NULL option is set and column matches the NULL
|
||||
* string. It must have been quoted, or otherwise the
|
||||
* string would already have been set to NULL. Convert it
|
||||
* to NULL as specified.
|
||||
*/
|
||||
string = NULL;
|
||||
}
|
||||
@ -2920,7 +2921,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
|
||||
* if client chooses to send that now.
|
||||
*
|
||||
* Note that we MUST NOT try to read more data in an old-protocol
|
||||
* copy, since there is no protocol-level EOF marker then. We
|
||||
* copy, since there is no protocol-level EOF marker then. We
|
||||
* could go either way for copy from file, but choose to throw
|
||||
* error if there's data after the EOF marker, for consistency
|
||||
* with the new-protocol case.
|
||||
@ -2982,7 +2983,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
|
||||
|
||||
/*
|
||||
* Now compute and insert any defaults available for the columns not
|
||||
* provided by the input data. Anything not processed here or above will
|
||||
* provided by the input data. Anything not processed here or above will
|
||||
* remain NULL.
|
||||
*/
|
||||
for (i = 0; i < num_defaults; i++)
|
||||
@ -3017,7 +3018,7 @@ EndCopyFrom(CopyState cstate)
|
||||
* server encoding.
|
||||
*
|
||||
* Result is true if read was terminated by EOF, false if terminated
|
||||
* by newline. The terminating newline or EOF marker is not included
|
||||
* by newline. The terminating newline or EOF marker is not included
|
||||
* in the final value of line_buf.
|
||||
*/
|
||||
static bool
|
||||
@ -3173,7 +3174,7 @@ CopyReadLineText(CopyState cstate)
|
||||
* of read-ahead and avoid the many calls to
|
||||
* IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
|
||||
* does not allow us to read too far ahead or we might read into the
|
||||
* next data, so we read-ahead only as far we know we can. One
|
||||
* next data, so we read-ahead only as far we know we can. One
|
||||
* optimization would be to read-ahead four byte here if
|
||||
* cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
|
||||
* considering the size of the buffer.
|
||||
@ -3183,7 +3184,7 @@ CopyReadLineText(CopyState cstate)
|
||||
REFILL_LINEBUF;
|
||||
|
||||
/*
|
||||
* Try to read some more data. This will certainly reset
|
||||
* Try to read some more data. This will certainly reset
|
||||
* raw_buf_index to zero, and raw_buf_ptr must go with it.
|
||||
*/
|
||||
if (!CopyLoadRawBuf(cstate))
|
||||
@ -3241,7 +3242,7 @@ CopyReadLineText(CopyState cstate)
|
||||
/*
|
||||
* Updating the line count for embedded CR and/or LF chars is
|
||||
* necessarily a little fragile - this test is probably about the
|
||||
* best we can do. (XXX it's arguable whether we should do this
|
||||
* best we can do. (XXX it's arguable whether we should do this
|
||||
* at all --- is cur_lineno a physical or logical count?)
|
||||
*/
|
||||
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
|
||||
@ -3420,7 +3421,7 @@ CopyReadLineText(CopyState cstate)
|
||||
* after a backslash is special, so we skip over that second
|
||||
* character too. If we didn't do that \\. would be
|
||||
* considered an eof-of copy, while in non-CSV mode it is a
|
||||
* literal backslash followed by a period. In CSV mode,
|
||||
* literal backslash followed by a period. In CSV mode,
|
||||
* backslashes are not special, so we want to process the
|
||||
* character after the backslash just like a normal character,
|
||||
* so we don't increment in those cases.
|
||||
@ -3523,7 +3524,7 @@ CopyReadAttributesText(CopyState cstate)
|
||||
/*
|
||||
* The de-escaped attributes will certainly not be longer than the input
|
||||
* data line, so we can just force attribute_buf to be large enough and
|
||||
* then transfer data without any checks for enough space. We need to do
|
||||
* then transfer data without any checks for enough space. We need to do
|
||||
* it this way because enlarging attribute_buf mid-stream would invalidate
|
||||
* pointers already stored into cstate->raw_fields[].
|
||||
*/
|
||||
@ -3753,7 +3754,7 @@ CopyReadAttributesCSV(CopyState cstate)
|
||||
/*
|
||||
* The de-escaped attributes will certainly not be longer than the input
|
||||
* data line, so we can just force attribute_buf to be large enough and
|
||||
* then transfer data without any checks for enough space. We need to do
|
||||
* then transfer data without any checks for enough space. We need to do
|
||||
* it this way because enlarging attribute_buf mid-stream would invalidate
|
||||
* pointers already stored into cstate->raw_fields[].
|
||||
*/
|
||||
@ -3968,7 +3969,7 @@ CopyAttributeOutText(CopyState cstate, char *string)
|
||||
/*
|
||||
* We have to grovel through the string searching for control characters
|
||||
* and instances of the delimiter character. In most cases, though, these
|
||||
* are infrequent. To avoid overhead from calling CopySendData once per
|
||||
* are infrequent. To avoid overhead from calling CopySendData once per
|
||||
* character, we dump out all characters between escaped characters in a
|
||||
* single call. The loop invariant is that the data from "start" to "ptr"
|
||||
* can be sent literally, but hasn't yet been.
|
||||
|
@ -104,7 +104,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
|
||||
|
||||
/*
|
||||
* For materialized views, lock down security-restricted operations and
|
||||
* arrange to make GUC variable changes local to this command. This is
|
||||
* arrange to make GUC variable changes local to this command. This is
|
||||
* not necessary for security, but this keeps the behavior similar to
|
||||
* REFRESH MATERIALIZED VIEW. Otherwise, one could create a materialized
|
||||
* view not possible to refresh.
|
||||
@ -124,9 +124,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
|
||||
* plancache.c.
|
||||
*
|
||||
* Because the rewriter and planner tend to scribble on the input, we make
|
||||
* a preliminary copy of the source querytree. This prevents problems in
|
||||
* a preliminary copy of the source querytree. This prevents problems in
|
||||
* the case that CTAS is in a portal or plpgsql function and is executed
|
||||
* repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
|
||||
* repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
|
||||
*/
|
||||
rewritten = QueryRewrite((Query *) copyObject(query));
|
||||
|
||||
@ -141,7 +141,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
|
||||
|
||||
/*
|
||||
* Use a snapshot with an updated command ID to ensure this query sees
|
||||
* results of any previously executed queries. (This could only matter if
|
||||
* results of any previously executed queries. (This could only matter if
|
||||
* the planner executed an allegedly-stable function that changed the
|
||||
* database contents, but let's do it anyway to be parallel to the EXPLAIN
|
||||
* code path.)
|
||||
@ -359,8 +359,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
|
||||
|
||||
/*
|
||||
* If necessary, create a TOAST table for the target table. Note that
|
||||
* NewRelationCreateToastTable ends with CommandCounterIncrement(), so that
|
||||
* the TOAST table will be visible for insertion.
|
||||
* NewRelationCreateToastTable ends with CommandCounterIncrement(), so
|
||||
* that the TOAST table will be visible for insertion.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
|
@ -265,7 +265,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
* To create a database, must have createdb privilege and must be able to
|
||||
* become the target role (this does not imply that the target role itself
|
||||
* must have createdb privilege). The latter provision guards against
|
||||
* "giveaway" attacks. Note that a superuser will always have both of
|
||||
* "giveaway" attacks. Note that a superuser will always have both of
|
||||
* these privileges a fortiori.
|
||||
*/
|
||||
if (!have_createdb_privilege())
|
||||
@ -397,7 +397,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
/*
|
||||
* If we are trying to change the default tablespace of the template,
|
||||
* we require that the template not have any files in the new default
|
||||
* tablespace. This is necessary because otherwise the copied
|
||||
* tablespace. This is necessary because otherwise the copied
|
||||
* database would contain pg_class rows that refer to its default
|
||||
* tablespace both explicitly (by OID) and implicitly (as zero), which
|
||||
* would cause problems. For example another CREATE DATABASE using
|
||||
@ -433,7 +433,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for db name conflict. This is just to give a more friendly error
|
||||
* Check for db name conflict. This is just to give a more friendly error
|
||||
* message than "unique index violation". There's a race condition but
|
||||
* we're willing to accept the less friendly message in that case.
|
||||
*/
|
||||
@ -498,7 +498,7 @@ createdb(const CreatedbStmt *stmt)
|
||||
|
||||
/*
|
||||
* We deliberately set datacl to default (NULL), rather than copying it
|
||||
* from the template database. Copying it would be a bad idea when the
|
||||
* from the template database. Copying it would be a bad idea when the
|
||||
* owner is not the same as the template's owner.
|
||||
*/
|
||||
new_record_nulls[Anum_pg_database_datacl - 1] = true;
|
||||
@ -751,7 +751,8 @@ dropdb(const char *dbname, bool missing_ok)
|
||||
HeapTuple tup;
|
||||
int notherbackends;
|
||||
int npreparedxacts;
|
||||
int nslots, nslots_active;
|
||||
int nslots,
|
||||
nslots_active;
|
||||
|
||||
/*
|
||||
* Look up the target database's OID, and get exclusive lock on it. We
|
||||
@ -1160,7 +1161,7 @@ movedb(const char *dbname, const char *tblspcname)
|
||||
|
||||
/*
|
||||
* Use an ENSURE block to make sure we remove the debris if the copy fails
|
||||
* (eg, due to out-of-disk-space). This is not a 100% solution, because
|
||||
* (eg, due to out-of-disk-space). This is not a 100% solution, because
|
||||
* of the possibility of failure during transaction commit, but it should
|
||||
* handle most scenarios.
|
||||
*/
|
||||
@ -1647,7 +1648,7 @@ get_db_info(const char *name, LOCKMODE lockmode,
|
||||
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
|
||||
|
||||
/*
|
||||
* And now, re-fetch the tuple by OID. If it's still there and still
|
||||
* And now, re-fetch the tuple by OID. If it's still there and still
|
||||
* the same name, we win; else, drop the lock and loop back to try
|
||||
* again.
|
||||
*/
|
||||
|
@ -202,7 +202,7 @@ defGetInt64(DefElem *def)
|
||||
|
||||
/*
|
||||
* Values too large for int4 will be represented as Float
|
||||
* constants by the lexer. Accept these if they are valid int8
|
||||
* constants by the lexer. Accept these if they are valid int8
|
||||
* strings.
|
||||
*/
|
||||
return DatumGetInt64(DirectFunctionCall1(int8in,
|
||||
|
@ -606,7 +606,7 @@ filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup for running triggers for the given event. Return value is an OID list
|
||||
* Setup for running triggers for the given event. Return value is an OID list
|
||||
* of functions to run; if there are any, trigdata is filled with an
|
||||
* appropriate EventTriggerData for them to receive.
|
||||
*/
|
||||
@ -625,7 +625,7 @@ EventTriggerCommonSetup(Node *parsetree,
|
||||
* invoked to match up exactly with the list that CREATE EVENT TRIGGER
|
||||
* accepts. This debugging cross-check will throw an error if this
|
||||
* function is invoked for a command tag that CREATE EVENT TRIGGER won't
|
||||
* accept. (Unfortunately, there doesn't seem to be any simple, automated
|
||||
* accept. (Unfortunately, there doesn't seem to be any simple, automated
|
||||
* way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that
|
||||
* never reaches this control point.)
|
||||
*
|
||||
@ -655,7 +655,7 @@ EventTriggerCommonSetup(Node *parsetree,
|
||||
|
||||
/*
|
||||
* Filter list of event triggers by command tag, and copy them into our
|
||||
* memory context. Once we start running the command trigers, or indeed
|
||||
* memory context. Once we start running the command trigers, or indeed
|
||||
* once we do anything at all that touches the catalogs, an invalidation
|
||||
* might leave cachelist pointing at garbage, so we must do this before we
|
||||
* can do much else.
|
||||
@ -783,7 +783,7 @@ EventTriggerSQLDrop(Node *parsetree)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Use current state to determine whether this event fires at all. If
|
||||
* Use current state to determine whether this event fires at all. If
|
||||
* there are no triggers for the sql_drop event, then we don't have
|
||||
* anything to do here. Note that dropped object collection is disabled
|
||||
* if this is the case, so even if we were to try to run, the list would
|
||||
@ -798,7 +798,7 @@ EventTriggerSQLDrop(Node *parsetree)
|
||||
&trigdata);
|
||||
|
||||
/*
|
||||
* Nothing to do if run list is empty. Note this shouldn't happen,
|
||||
* Nothing to do if run list is empty. Note this shouldn't happen,
|
||||
* because if there are no sql_drop events, then objects-to-drop wouldn't
|
||||
* have been collected in the first place and we would have quitted above.
|
||||
*/
|
||||
@ -813,7 +813,7 @@ EventTriggerSQLDrop(Node *parsetree)
|
||||
|
||||
/*
|
||||
* Make sure pg_event_trigger_dropped_objects only works when running
|
||||
* these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
|
||||
* these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
|
||||
* one trigger fails. (This is perhaps not necessary, as the currentState
|
||||
* variable will be removed shortly by our caller, but it seems better to
|
||||
* play safe.)
|
||||
@ -1053,7 +1053,7 @@ EventTriggerBeginCompleteQuery(void)
|
||||
* returned false previously.
|
||||
*
|
||||
* Note: this might be called in the PG_CATCH block of a failing transaction,
|
||||
* so be wary of running anything unnecessary. (In particular, it's probably
|
||||
* so be wary of running anything unnecessary. (In particular, it's probably
|
||||
* unwise to try to allocate memory.)
|
||||
*/
|
||||
void
|
||||
|
@ -86,7 +86,7 @@ static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
|
||||
static void show_sort_info(SortState *sortstate, ExplainState *es);
|
||||
static void show_hash_info(HashState *hashstate, ExplainState *es);
|
||||
static void show_tidbitmap_info(BitmapHeapScanState *planstate,
|
||||
ExplainState *es);
|
||||
ExplainState *es);
|
||||
static void show_instrumentation_count(const char *qlabel, int which,
|
||||
PlanState *planstate, ExplainState *es);
|
||||
static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es);
|
||||
@ -197,7 +197,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
|
||||
* plancache.c.
|
||||
*
|
||||
* Because the rewriter and planner tend to scribble on the input, we make
|
||||
* a preliminary copy of the source querytree. This prevents problems in
|
||||
* a preliminary copy of the source querytree. This prevents problems in
|
||||
* the case that the EXPLAIN is in a portal or plpgsql function and is
|
||||
* executed repeatedly. (See also the same hack in DECLARE CURSOR and
|
||||
* PREPARE.) XXX FIXME someday.
|
||||
@ -320,8 +320,9 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
|
||||
(*ExplainOneQuery_hook) (query, into, es, queryString, params);
|
||||
else
|
||||
{
|
||||
PlannedStmt *plan;
|
||||
instr_time planstart, planduration;
|
||||
PlannedStmt *plan;
|
||||
instr_time planstart,
|
||||
planduration;
|
||||
|
||||
INSTR_TIME_SET_CURRENT(planstart);
|
||||
|
||||
@ -493,7 +494,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
|
||||
|
||||
if (es->costs && planduration)
|
||||
{
|
||||
double plantime = INSTR_TIME_GET_DOUBLE(*planduration);
|
||||
double plantime = INSTR_TIME_GET_DOUBLE(*planduration);
|
||||
|
||||
if (es->format == EXPLAIN_FORMAT_TEXT)
|
||||
appendStringInfo(es->str, "Planning time: %.3f ms\n",
|
||||
@ -542,7 +543,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
|
||||
* convert a QueryDesc's plan tree to text and append it to es->str
|
||||
*
|
||||
* The caller should have set up the options fields of *es, as well as
|
||||
* initializing the output buffer es->str. Other fields in *es are
|
||||
* initializing the output buffer es->str. Other fields in *es are
|
||||
* initialized here.
|
||||
*
|
||||
* NB: will not work on utility statements
|
||||
@ -567,7 +568,7 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
|
||||
* es->str
|
||||
*
|
||||
* The caller should have set up the options fields of *es, as well as
|
||||
* initializing the output buffer es->str. Other fields in *es are
|
||||
* initializing the output buffer es->str. Other fields in *es are
|
||||
* initialized here.
|
||||
*/
|
||||
void
|
||||
@ -2193,7 +2194,7 @@ show_modifytable_info(ModifyTableState *mtstate, ExplainState *es)
|
||||
|
||||
/*
|
||||
* If the first target relation is a foreign table, call its FDW to
|
||||
* display whatever additional fields it wants to. For now, we ignore the
|
||||
* display whatever additional fields it wants to. For now, we ignore the
|
||||
* possibility of other targets being foreign tables, although the API for
|
||||
* ExplainForeignModify is designed to allow them to be processed.
|
||||
*/
|
||||
@ -2692,7 +2693,7 @@ ExplainXMLTag(const char *tagname, int flags, ExplainState *es)
|
||||
/*
|
||||
* Emit a JSON line ending.
|
||||
*
|
||||
* JSON requires a comma after each property but the last. To facilitate this,
|
||||
* JSON requires a comma after each property but the last. To facilitate this,
|
||||
* in JSON format, the text emitted for each property begins just prior to the
|
||||
* preceding line-break (and comma, if applicable).
|
||||
*/
|
||||
@ -2713,7 +2714,7 @@ ExplainJSONLineEnding(ExplainState *es)
|
||||
* YAML lines are ordinarily indented by two spaces per indentation level.
|
||||
* The text emitted for each property begins just prior to the preceding
|
||||
* line-break, except for the first property in an unlabelled group, for which
|
||||
* it begins immediately after the "- " that introduces the group. The first
|
||||
* it begins immediately after the "- " that introduces the group. The first
|
||||
* property of the group appears on the same line as the opening "- ".
|
||||
*/
|
||||
static void
|
||||
|
@ -108,7 +108,7 @@ static void ApplyExtensionUpdates(Oid extensionOid,
|
||||
/*
|
||||
* get_extension_oid - given an extension name, look up the OID
|
||||
*
|
||||
* If missing_ok is false, throw an error if extension name not found. If
|
||||
* If missing_ok is false, throw an error if extension name not found. If
|
||||
* true, just return InvalidOid.
|
||||
*/
|
||||
Oid
|
||||
@ -257,9 +257,9 @@ check_valid_extension_name(const char *extensionname)
|
||||
errdetail("Extension names must not contain \"--\".")));
|
||||
|
||||
/*
|
||||
* No leading or trailing dash either. (We could probably allow this, but
|
||||
* No leading or trailing dash either. (We could probably allow this, but
|
||||
* it would require much care in filename parsing and would make filenames
|
||||
* visually if not formally ambiguous. Since there's no real-world use
|
||||
* visually if not formally ambiguous. Since there's no real-world use
|
||||
* case, let's just forbid it.)
|
||||
*/
|
||||
if (extensionname[0] == '-' || extensionname[namelen - 1] == '-')
|
||||
@ -435,7 +435,7 @@ get_extension_script_filename(ExtensionControlFile *control,
|
||||
|
||||
/*
|
||||
* Parse contents of primary or auxiliary control file, and fill in
|
||||
* fields of *control. We parse primary file if version == NULL,
|
||||
* fields of *control. We parse primary file if version == NULL,
|
||||
* else the optional auxiliary file for that version.
|
||||
*
|
||||
* Control files are supposed to be very short, half a dozen lines,
|
||||
@ -673,7 +673,7 @@ read_extension_script_file(const ExtensionControlFile *control,
|
||||
* filename is used only to report errors.
|
||||
*
|
||||
* Note: it's tempting to just use SPI to execute the string, but that does
|
||||
* not work very well. The really serious problem is that SPI will parse,
|
||||
* not work very well. The really serious problem is that SPI will parse,
|
||||
* analyze, and plan the whole string before executing any of it; of course
|
||||
* this fails if there are any plannable statements referring to objects
|
||||
* created earlier in the script. A lesser annoyance is that SPI insists
|
||||
@ -848,7 +848,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
|
||||
/*
|
||||
* Set creating_extension and related variables so that
|
||||
* recordDependencyOnCurrentExtension and other functions do the right
|
||||
* things. On failure, ensure we reset these variables.
|
||||
* things. On failure, ensure we reset these variables.
|
||||
*/
|
||||
creating_extension = true;
|
||||
CurrentExtensionObject = extensionOid;
|
||||
@ -1092,7 +1092,7 @@ identify_update_path(ExtensionControlFile *control,
|
||||
* is still good.
|
||||
*
|
||||
* Result is a List of names of versions to transition through (the initial
|
||||
* version is *not* included). Returns NIL if no such path.
|
||||
* version is *not* included). Returns NIL if no such path.
|
||||
*/
|
||||
static List *
|
||||
find_update_path(List *evi_list,
|
||||
@ -1193,7 +1193,7 @@ CreateExtension(CreateExtensionStmt *stmt)
|
||||
check_valid_extension_name(stmt->extname);
|
||||
|
||||
/*
|
||||
* Check for duplicate extension name. The unique index on
|
||||
* Check for duplicate extension name. The unique index on
|
||||
* pg_extension.extname would catch this anyway, and serves as a backstop
|
||||
* in case of race conditions; but this is a friendlier error message, and
|
||||
* besides we need a check to support IF NOT EXISTS.
|
||||
@ -1360,7 +1360,7 @@ CreateExtension(CreateExtensionStmt *stmt)
|
||||
{
|
||||
/*
|
||||
* The extension is not relocatable and the author gave us a schema
|
||||
* for it. We create the schema here if it does not already exist.
|
||||
* for it. We create the schema here if it does not already exist.
|
||||
*/
|
||||
schemaName = control->schema;
|
||||
schemaOid = get_namespace_oid(schemaName, true);
|
||||
@ -1390,7 +1390,7 @@ CreateExtension(CreateExtensionStmt *stmt)
|
||||
*/
|
||||
List *search_path = fetch_search_path(false);
|
||||
|
||||
if (search_path == NIL) /* nothing valid in search_path? */
|
||||
if (search_path == NIL) /* nothing valid in search_path? */
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_SCHEMA),
|
||||
errmsg("no schema has been selected to create in")));
|
||||
@ -1589,7 +1589,7 @@ RemoveExtensionById(Oid extId)
|
||||
* might write "DROP EXTENSION foo" in foo's own script files, as because
|
||||
* errors in dependency management in extension script files could give
|
||||
* rise to cases where an extension is dropped as a result of recursing
|
||||
* from some contained object. Because of that, we must test for the case
|
||||
* from some contained object. Because of that, we must test for the case
|
||||
* here, not at some higher level of the DROP EXTENSION command.
|
||||
*/
|
||||
if (extId == CurrentExtensionObject)
|
||||
@ -1620,7 +1620,7 @@ RemoveExtensionById(Oid extId)
|
||||
|
||||
/*
|
||||
* This function lists the available extensions (one row per primary control
|
||||
* file in the control directory). We parse each control file and report the
|
||||
* file in the control directory). We parse each control file and report the
|
||||
* interesting fields.
|
||||
*
|
||||
* The system view pg_available_extensions provides a user interface to this
|
||||
@ -1729,7 +1729,7 @@ pg_available_extensions(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* This function lists the available extension versions (one row per
|
||||
* extension installation script). For each version, we parse the related
|
||||
* extension installation script). For each version, we parse the related
|
||||
* control file(s) and report the interesting fields.
|
||||
*
|
||||
* The system view pg_available_extension_versions provides a user interface
|
||||
@ -2517,7 +2517,7 @@ AlterExtensionNamespace(List *names, const char *newschema)
|
||||
Oid dep_oldNspOid;
|
||||
|
||||
/*
|
||||
* Ignore non-membership dependencies. (Currently, the only other
|
||||
* Ignore non-membership dependencies. (Currently, the only other
|
||||
* case we could see here is a normal dependency from another
|
||||
* extension.)
|
||||
*/
|
||||
@ -2929,7 +2929,7 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
|
||||
|
||||
/*
|
||||
* Prevent a schema from being added to an extension if the schema
|
||||
* contains the extension. That would create a dependency loop.
|
||||
* contains the extension. That would create a dependency loop.
|
||||
*/
|
||||
if (object.classId == NamespaceRelationId &&
|
||||
object.objectId == get_extension_schema(extension.objectId))
|
||||
|
@ -81,7 +81,7 @@ optionListToArray(List *options)
|
||||
|
||||
|
||||
/*
|
||||
* Transform a list of DefElem into text array format. This is substantially
|
||||
* Transform a list of DefElem into text array format. This is substantially
|
||||
* the same thing as optionListToArray(), except we recognize SET/ADD/DROP
|
||||
* actions for modifying an existing list of options, which is passed in
|
||||
* Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid
|
||||
@ -125,7 +125,7 @@ transformGenericOptions(Oid catalogId,
|
||||
|
||||
/*
|
||||
* It is possible to perform multiple SET/DROP actions on the same
|
||||
* option. The standard permits this, as long as the options to be
|
||||
* option. The standard permits this, as long as the options to be
|
||||
* added are unique. Note that an unspecified action is taken to be
|
||||
* ADD.
|
||||
*/
|
||||
|
@ -74,7 +74,7 @@
|
||||
* allow a shell type to be used, or even created if the specified return type
|
||||
* doesn't exist yet. (Without this, there's no way to define the I/O procs
|
||||
* for a new type.) But SQL function creation won't cope, so error out if
|
||||
* the target language is SQL. (We do this here, not in the SQL-function
|
||||
* the target language is SQL. (We do this here, not in the SQL-function
|
||||
* validator, so as not to produce a NOTICE and then an ERROR for the same
|
||||
* condition.)
|
||||
*/
|
||||
@ -451,7 +451,7 @@ interpret_function_parameter_list(List *parameters,
|
||||
* FUNCTION and ALTER FUNCTION and return it via one of the out
|
||||
* parameters. Returns true if the passed option was recognized. If
|
||||
* the out parameter we were going to assign to points to non-NULL,
|
||||
* raise a duplicate-clause error. (We don't try to detect duplicate
|
||||
* raise a duplicate-clause error. (We don't try to detect duplicate
|
||||
* SET parameters though --- if you're redundant, the last one wins.)
|
||||
*/
|
||||
static bool
|
||||
@ -760,7 +760,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName,
|
||||
{
|
||||
/*
|
||||
* For "C" language, store the file name in probin and, when given,
|
||||
* the link symbol name in prosrc. If link symbol is omitted,
|
||||
* the link symbol name in prosrc. If link symbol is omitted,
|
||||
* substitute procedure name. We also allow link symbol to be
|
||||
* specified as "-", since that was the habit in PG versions before
|
||||
* 8.4, and there might be dump files out there that don't translate
|
||||
@ -1394,7 +1394,7 @@ CreateCast(CreateCastStmt *stmt)
|
||||
/*
|
||||
* Restricting the volatility of a cast function may or may not be a
|
||||
* good idea in the abstract, but it definitely breaks many old
|
||||
* user-defined types. Disable this check --- tgl 2/1/03
|
||||
* user-defined types. Disable this check --- tgl 2/1/03
|
||||
*/
|
||||
#ifdef NOT_USED
|
||||
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
|
||||
@ -1458,7 +1458,7 @@ CreateCast(CreateCastStmt *stmt)
|
||||
|
||||
/*
|
||||
* We know that composite, enum and array types are never binary-
|
||||
* compatible with each other. They all have OIDs embedded in them.
|
||||
* compatible with each other. They all have OIDs embedded in them.
|
||||
*
|
||||
* Theoretically you could build a user-defined base type that is
|
||||
* binary-compatible with a composite, enum, or array type. But we
|
||||
@ -1487,7 +1487,7 @@ CreateCast(CreateCastStmt *stmt)
|
||||
* We also disallow creating binary-compatibility casts involving
|
||||
* domains. Casting from a domain to its base type is already
|
||||
* allowed, and casting the other way ought to go through domain
|
||||
* coercion to permit constraint checking. Again, if you're intent on
|
||||
* coercion to permit constraint checking. Again, if you're intent on
|
||||
* having your own semantics for that, create a no-op cast function.
|
||||
*
|
||||
* NOTE: if we were to relax this, the above checks for composites
|
||||
|
@ -102,7 +102,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
|
||||
* concrete benefit for core types.
|
||||
|
||||
* When a comparison or exclusion operator has a polymorphic input type, the
|
||||
* actual input types must also match. This defends against the possibility
|
||||
* actual input types must also match. This defends against the possibility
|
||||
* that operators could vary behavior in response to get_fn_expr_argtype().
|
||||
* At present, this hazard is theoretical: check_exclusion_constraint() and
|
||||
* all core index access methods decline to set fn_expr for such calls.
|
||||
@ -349,11 +349,11 @@ DefineIndex(Oid relationId,
|
||||
* index build; but for concurrent builds we allow INSERT/UPDATE/DELETE
|
||||
* (but not VACUUM).
|
||||
*
|
||||
* NB: Caller is responsible for making sure that relationId refers
|
||||
* to the relation on which the index should be built; except in bootstrap
|
||||
* mode, this will typically require the caller to have already locked
|
||||
* the relation. To avoid lock upgrade hazards, that lock should be at
|
||||
* least as strong as the one we take here.
|
||||
* NB: Caller is responsible for making sure that relationId refers to the
|
||||
* relation on which the index should be built; except in bootstrap mode,
|
||||
* this will typically require the caller to have already locked the
|
||||
* relation. To avoid lock upgrade hazards, that lock should be at least
|
||||
* as strong as the one we take here.
|
||||
*/
|
||||
lockmode = stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock;
|
||||
rel = heap_open(relationId, lockmode);
|
||||
@ -433,7 +433,7 @@ DefineIndex(Oid relationId,
|
||||
}
|
||||
|
||||
/*
|
||||
* Force shared indexes into the pg_global tablespace. This is a bit of a
|
||||
* Force shared indexes into the pg_global tablespace. This is a bit of a
|
||||
* hack but seems simpler than marking them in the BKI commands. On the
|
||||
* other hand, if it's not shared, don't allow it to be placed there.
|
||||
*/
|
||||
@ -628,7 +628,7 @@ DefineIndex(Oid relationId,
|
||||
/*
|
||||
* For a concurrent build, it's important to make the catalog entries
|
||||
* visible to other transactions before we start to build the index. That
|
||||
* will prevent them from making incompatible HOT updates. The new index
|
||||
* will prevent them from making incompatible HOT updates. The new index
|
||||
* will be marked not indisready and not indisvalid, so that no one else
|
||||
* tries to either insert into it or use it for queries.
|
||||
*
|
||||
@ -676,7 +676,7 @@ DefineIndex(Oid relationId,
|
||||
* indexes. We have waited out all the existing transactions and any new
|
||||
* transaction will have the new index in its list, but the index is still
|
||||
* marked as "not-ready-for-inserts". The index is consulted while
|
||||
* deciding HOT-safety though. This arrangement ensures that no new HOT
|
||||
* deciding HOT-safety though. This arrangement ensures that no new HOT
|
||||
* chains can be created where the new tuple and the old tuple in the
|
||||
* chain have different index keys.
|
||||
*
|
||||
@ -736,7 +736,7 @@ DefineIndex(Oid relationId,
|
||||
|
||||
/*
|
||||
* Now take the "reference snapshot" that will be used by validate_index()
|
||||
* to filter candidate tuples. Beware! There might still be snapshots in
|
||||
* to filter candidate tuples. Beware! There might still be snapshots in
|
||||
* use that treat some transaction as in-progress that our reference
|
||||
* snapshot treats as committed. If such a recently-committed transaction
|
||||
* deleted tuples in the table, we will not include them in the index; yet
|
||||
@ -761,7 +761,7 @@ DefineIndex(Oid relationId,
|
||||
* Drop the reference snapshot. We must do this before waiting out other
|
||||
* snapshot holders, else we will deadlock against other processes also
|
||||
* doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one
|
||||
* they must wait for. But first, save the snapshot's xmin to use as
|
||||
* they must wait for. But first, save the snapshot's xmin to use as
|
||||
* limitXmin for GetCurrentVirtualXIDs().
|
||||
*/
|
||||
limitXmin = snapshot->xmin;
|
||||
@ -771,7 +771,7 @@ DefineIndex(Oid relationId,
|
||||
|
||||
/*
|
||||
* The index is now valid in the sense that it contains all currently
|
||||
* interesting tuples. But since it might not contain tuples deleted just
|
||||
* interesting tuples. But since it might not contain tuples deleted just
|
||||
* before the reference snap was taken, we have to wait out any
|
||||
* transactions that might have older snapshots. Obtain a list of VXIDs
|
||||
* of such transactions, and wait for them individually.
|
||||
@ -786,7 +786,7 @@ DefineIndex(Oid relationId,
|
||||
*
|
||||
* We can also exclude autovacuum processes and processes running manual
|
||||
* lazy VACUUMs, because they won't be fazed by missing index entries
|
||||
* either. (Manual ANALYZEs, however, can't be excluded because they
|
||||
* either. (Manual ANALYZEs, however, can't be excluded because they
|
||||
* might be within transactions that are going to do arbitrary operations
|
||||
* later.)
|
||||
*
|
||||
@ -875,7 +875,7 @@ CheckMutability(Expr *expr)
|
||||
{
|
||||
/*
|
||||
* First run the expression through the planner. This has a couple of
|
||||
* important consequences. First, function default arguments will get
|
||||
* important consequences. First, function default arguments will get
|
||||
* inserted, which may affect volatility (consider "default now()").
|
||||
* Second, inline-able functions will get inlined, which may allow us to
|
||||
* conclude that the function is really less volatile than it's marked. As
|
||||
@ -898,7 +898,7 @@ CheckMutability(Expr *expr)
|
||||
* Checks that the given partial-index predicate is valid.
|
||||
*
|
||||
* This used to also constrain the form of the predicate to forms that
|
||||
* indxpath.c could do something with. However, that seems overly
|
||||
* indxpath.c could do something with. However, that seems overly
|
||||
* restrictive. One useful application of partial indexes is to apply
|
||||
* a UNIQUE constraint across a subset of a table, and in that scenario
|
||||
* any evaluatable predicate will work. So accept any predicate here
|
||||
@ -1009,7 +1009,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
|
||||
attcollation = exprCollation(expr);
|
||||
|
||||
/*
|
||||
* Strip any top-level COLLATE clause. This ensures that we treat
|
||||
* Strip any top-level COLLATE clause. This ensures that we treat
|
||||
* "x COLLATE y" and "(x COLLATE y)" alike.
|
||||
*/
|
||||
while (IsA(expr, CollateExpr))
|
||||
@ -1215,7 +1215,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
|
||||
* 2000/07/30
|
||||
*
|
||||
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
|
||||
* too for awhile. I'm starting to think we need a better approach. tgl
|
||||
* too for awhile. I'm starting to think we need a better approach. tgl
|
||||
* 2000/10/01
|
||||
*
|
||||
* Release 8.0 removes bigbox_ops (which was dead code for a long while
|
||||
@ -1284,7 +1284,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
|
||||
NameListToString(opclass), accessMethodName)));
|
||||
|
||||
/*
|
||||
* Verify that the index operator class accepts this datatype. Note we
|
||||
* Verify that the index operator class accepts this datatype. Note we
|
||||
* will accept binary compatibility.
|
||||
*/
|
||||
opClassId = HeapTupleGetOid(tuple);
|
||||
@ -1305,7 +1305,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
|
||||
* GetDefaultOpClass
|
||||
*
|
||||
* Given the OIDs of a datatype and an access method, find the default
|
||||
* operator class, if any. Returns InvalidOid if there is none.
|
||||
* operator class, if any. Returns InvalidOid if there is none.
|
||||
*/
|
||||
Oid
|
||||
GetDefaultOpClass(Oid type_id, Oid am_id)
|
||||
@ -1400,7 +1400,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
|
||||
* Create a name for an implicitly created index, sequence, constraint, etc.
|
||||
*
|
||||
* The parameters are typically: the original table name, the original field
|
||||
* name, and a "type" string (such as "seq" or "pkey"). The field name
|
||||
* name, and a "type" string (such as "seq" or "pkey"). The field name
|
||||
* and/or type can be NULL if not relevant.
|
||||
*
|
||||
* The result is a palloc'd string.
|
||||
@ -1408,7 +1408,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
|
||||
* The basic result we want is "name1_name2_label", omitting "_name2" or
|
||||
* "_label" when those parameters are NULL. However, we must generate
|
||||
* a name with less than NAMEDATALEN characters! So, we truncate one or
|
||||
* both names if necessary to make a short-enough string. The label part
|
||||
* both names if necessary to make a short-enough string. The label part
|
||||
* is never truncated (so it had better be reasonably short).
|
||||
*
|
||||
* The caller is responsible for checking uniqueness of the generated
|
||||
@ -1603,7 +1603,7 @@ ChooseIndexNameAddition(List *colnames)
|
||||
|
||||
/*
|
||||
* Select the actual names to be used for the columns of an index, given the
|
||||
* list of IndexElems for the columns. This is mostly about ensuring the
|
||||
* list of IndexElems for the columns. This is mostly about ensuring the
|
||||
* names are unique so we don't get a conflicting-attribute-names error.
|
||||
*
|
||||
* Returns a List of plain strings (char *, not String nodes).
|
||||
@ -1714,7 +1714,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
|
||||
/*
|
||||
* If the relation does exist, check whether it's an index. But note that
|
||||
* the relation might have been dropped between the time we did the name
|
||||
* lookup and now. In that case, there's nothing to do.
|
||||
* lookup and now. In that case, there's nothing to do.
|
||||
*/
|
||||
relkind = get_rel_relkind(relId);
|
||||
if (!relkind)
|
||||
|
@ -240,9 +240,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
|
||||
owner = matviewRel->rd_rel->relowner;
|
||||
|
||||
/*
|
||||
* Create the transient table that will receive the regenerated data.
|
||||
* Lock it against access by any other process until commit (by which time
|
||||
* it will be gone).
|
||||
* Create the transient table that will receive the regenerated data. Lock
|
||||
* it against access by any other process until commit (by which time it
|
||||
* will be gone).
|
||||
*/
|
||||
OIDNewHeap = make_new_heap(matviewOid, tableSpace, concurrent,
|
||||
ExclusiveLock);
|
||||
@ -319,7 +319,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query,
|
||||
|
||||
/*
|
||||
* Use a snapshot with an updated command ID to ensure this query sees
|
||||
* results of any previously executed queries. (This could only matter if
|
||||
* results of any previously executed queries. (This could only matter if
|
||||
* the planner executed an allegedly-stable function that changed the
|
||||
* database contents, but let's do it anyway to be safe.)
|
||||
*/
|
||||
@ -495,9 +495,9 @@ mv_GenerateOper(StringInfo buf, Oid opoid)
|
||||
*
|
||||
* This is called after a new version of the data has been created in a
|
||||
* temporary table. It performs a full outer join against the old version of
|
||||
* the data, producing "diff" results. This join cannot work if there are any
|
||||
* the data, producing "diff" results. This join cannot work if there are any
|
||||
* duplicated rows in either the old or new versions, in the sense that every
|
||||
* column would compare as equal between the two rows. It does work correctly
|
||||
* column would compare as equal between the two rows. It does work correctly
|
||||
* in the face of rows which have at least one NULL value, with all non-NULL
|
||||
* columns equal. The behavior of NULLs on equality tests and on UNIQUE
|
||||
* indexes turns out to be quite convenient here; the tests we need to make
|
||||
@ -561,7 +561,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid)
|
||||
|
||||
/*
|
||||
* We need to ensure that there are not duplicate rows without NULLs in
|
||||
* the new data set before we can count on the "diff" results. Check for
|
||||
* the new data set before we can count on the "diff" results. Check for
|
||||
* that in a way that allows showing the first duplicated row found. Even
|
||||
* after we pass this test, a unique index on the materialized view may
|
||||
* find a duplicate key problem.
|
||||
@ -707,7 +707,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid)
|
||||
/* Deletes must come before inserts; do them first. */
|
||||
resetStringInfo(&querybuf);
|
||||
appendStringInfo(&querybuf,
|
||||
"DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY "
|
||||
"DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY "
|
||||
"(SELECT diff.tid FROM %s diff "
|
||||
"WHERE diff.tid IS NOT NULL "
|
||||
"AND diff.newdata IS NULL)",
|
||||
|
@ -391,7 +391,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
* A minimum expectation therefore is that the caller have execute
|
||||
* privilege with grant option. Since we don't have a way to make the
|
||||
* opclass go away if the grant option is revoked, we choose instead to
|
||||
* require ownership of the functions. It's also not entirely clear what
|
||||
* require ownership of the functions. It's also not entirely clear what
|
||||
* permissions should be required on the datatype, but ownership seems
|
||||
* like a safe choice.
|
||||
*
|
||||
@ -673,7 +673,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
|
||||
opclassoid, procedures, false);
|
||||
|
||||
/*
|
||||
* Create dependencies for the opclass proper. Note: we do not create a
|
||||
* Create dependencies for the opclass proper. Note: we do not create a
|
||||
* dependency link to the AM, because we don't currently support DROP
|
||||
* ACCESS METHOD.
|
||||
*/
|
||||
@ -1090,7 +1090,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
|
||||
if (OidIsValid(member->sortfamily))
|
||||
{
|
||||
/*
|
||||
* Ordering op, check index supports that. (We could perhaps also
|
||||
* Ordering op, check index supports that. (We could perhaps also
|
||||
* check that the operator returns a type supported by the sortfamily,
|
||||
* but that seems more trouble than it's worth here. If it does not,
|
||||
* the operator will never be matchable to any ORDER BY clause, but no
|
||||
@ -1219,7 +1219,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
|
||||
|
||||
/*
|
||||
* The default in CREATE OPERATOR CLASS is to use the class' opcintype as
|
||||
* lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
|
||||
* lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
|
||||
* isn't available, so make the user specify the types.
|
||||
*/
|
||||
if (!OidIsValid(member->lefttype))
|
||||
|
@ -211,7 +211,7 @@ DefineOperator(List *names, List *parameters)
|
||||
functionOid = LookupFuncName(functionName, nargs, typeId, false);
|
||||
|
||||
/*
|
||||
* We require EXECUTE rights for the function. This isn't strictly
|
||||
* We require EXECUTE rights for the function. This isn't strictly
|
||||
* necessary, since EXECUTE will be checked at any attempted use of the
|
||||
* operator, but it seems like a good idea anyway.
|
||||
*/
|
||||
|
@ -4,7 +4,7 @@
|
||||
* Utility commands affecting portals (that is, SQL cursor commands)
|
||||
*
|
||||
* Note: see also tcop/pquery.c, which implements portal operations for
|
||||
* the FE/BE protocol. This module uses pquery.c for some operations.
|
||||
* the FE/BE protocol. This module uses pquery.c for some operations.
|
||||
* And both modules depend on utils/mmgr/portalmem.c, which controls
|
||||
* storage management for portals (but doesn't run any queries in them).
|
||||
*
|
||||
@ -89,7 +89,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
|
||||
|
||||
/*----------
|
||||
* Also copy the outer portal's parameter list into the inner portal's
|
||||
* memory context. We want to pass down the parameter values in case we
|
||||
* memory context. We want to pass down the parameter values in case we
|
||||
* had a command like
|
||||
* DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
|
||||
* This will have been parsed using the outer parameter set and the
|
||||
@ -106,7 +106,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
|
||||
*
|
||||
* If the user didn't specify a SCROLL type, allow or disallow scrolling
|
||||
* based on whether it would require any additional runtime overhead to do
|
||||
* so. Also, we disallow scrolling for FOR UPDATE cursors.
|
||||
* so. Also, we disallow scrolling for FOR UPDATE cursors.
|
||||
*/
|
||||
portal->cursorOptions = cstmt->options;
|
||||
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
|
||||
@ -365,7 +365,7 @@ PersistHoldablePortal(Portal portal)
|
||||
ExecutorRewind(queryDesc);
|
||||
|
||||
/*
|
||||
* Change the destination to output to the tuplestore. Note we tell
|
||||
* Change the destination to output to the tuplestore. Note we tell
|
||||
* the tuplestore receiver to detoast all data passed through it.
|
||||
*/
|
||||
queryDesc->dest = CreateDestReceiver(DestTuplestore);
|
||||
|
@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
|
||||
* ExecuteQuery --- implement the 'EXECUTE' utility statement.
|
||||
*
|
||||
* This code also supports CREATE TABLE ... AS EXECUTE. That case is
|
||||
* indicated by passing a non-null intoClause. The DestReceiver is already
|
||||
* indicated by passing a non-null intoClause. The DestReceiver is already
|
||||
* set up correctly for CREATE TABLE AS, but we still have to make a few
|
||||
* other adjustments here.
|
||||
*
|
||||
@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
|
||||
{
|
||||
/*
|
||||
* Need an EState to evaluate parameters; must not delete it till end
|
||||
* of query, in case parameters are pass-by-reference. Note that the
|
||||
* of query, in case parameters are pass-by-reference. Note that the
|
||||
* passed-in "params" could possibly be referenced in the parameter
|
||||
* expressions.
|
||||
*/
|
||||
@ -237,7 +237,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
|
||||
/*
|
||||
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
|
||||
* statement is one that produces tuples. Currently we insist that it be
|
||||
* a plain old SELECT. In future we might consider supporting other
|
||||
* a plain old SELECT. In future we might consider supporting other
|
||||
* things such as INSERT ... RETURNING, but there are a couple of issues
|
||||
* to be settled first, notably how WITH NO DATA should be handled in such
|
||||
* a case (do we really want to suppress execution?) and how to pass down
|
||||
@ -529,7 +529,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt)
|
||||
|
||||
/*
|
||||
* Given a prepared statement that returns tuples, extract the query
|
||||
* targetlist. Returns NIL if the statement doesn't have a determinable
|
||||
* targetlist. Returns NIL if the statement doesn't have a determinable
|
||||
* targetlist.
|
||||
*
|
||||
* Note: this is pretty ugly, but since it's only used in corner cases like
|
||||
@ -644,7 +644,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
|
||||
{
|
||||
/*
|
||||
* Need an EState to evaluate parameters; must not delete it till end
|
||||
* of query, in case parameters are pass-by-reference. Note that the
|
||||
* of query, in case parameters are pass-by-reference. Note that the
|
||||
* passed-in "params" could possibly be referenced in the parameter
|
||||
* expressions.
|
||||
*/
|
||||
|
@ -260,7 +260,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
|
||||
if (funcrettype != LANGUAGE_HANDLEROID)
|
||||
{
|
||||
/*
|
||||
* We allow OPAQUE just so we can load old dump files. When we
|
||||
* We allow OPAQUE just so we can load old dump files. When we
|
||||
* see a handler function declared OPAQUE, change it to
|
||||
* LANGUAGE_HANDLER. (This is probably obsolete and removable?)
|
||||
*/
|
||||
|
@ -67,7 +67,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
|
||||
* To create a schema, must have schema-create privilege on the current
|
||||
* database and must be able to become the target role (this does not
|
||||
* imply that the target role itself must have create-schema privilege).
|
||||
* The latter provision guards against "giveaway" attacks. Note that a
|
||||
* The latter provision guards against "giveaway" attacks. Note that a
|
||||
* superuser will always have both of these privileges a fortiori.
|
||||
*/
|
||||
aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
|
||||
@ -132,7 +132,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
|
||||
/*
|
||||
* Examine the list of commands embedded in the CREATE SCHEMA command, and
|
||||
* reorganize them into a sequentially executable order with no forward
|
||||
* references. Note that the result is still a list of raw parsetrees ---
|
||||
* references. Note that the result is still a list of raw parsetrees ---
|
||||
* we cannot, in general, run parse analysis on one statement until we
|
||||
* have actually executed the prior ones.
|
||||
*/
|
||||
|
@ -279,7 +279,7 @@ ResetSequence(Oid seq_relid)
|
||||
seq->log_cnt = 0;
|
||||
|
||||
/*
|
||||
* Create a new storage file for the sequence. We want to keep the
|
||||
* Create a new storage file for the sequence. We want to keep the
|
||||
* sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs.
|
||||
* Same with relminmxid, since a sequence will never contain multixacts.
|
||||
*/
|
||||
@ -325,9 +325,9 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
|
||||
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* Since VACUUM does not process sequences, we have to force the tuple
|
||||
* to have xmin = FrozenTransactionId now. Otherwise it would become
|
||||
* invisible to SELECTs after 2G transactions. It is okay to do this
|
||||
* Since VACUUM does not process sequences, we have to force the tuple to
|
||||
* have xmin = FrozenTransactionId now. Otherwise it would become
|
||||
* invisible to SELECTs after 2G transactions. It is okay to do this
|
||||
* because if the current transaction aborts, no other xact will ever
|
||||
* examine the sequence tuple anyway.
|
||||
*/
|
||||
@ -487,7 +487,7 @@ nextval(PG_FUNCTION_ARGS)
|
||||
* XXX: This is not safe in the presence of concurrent DDL, but acquiring
|
||||
* a lock here is more expensive than letting nextval_internal do it,
|
||||
* since the latter maintains a cache that keeps us from hitting the lock
|
||||
* manager more than once per transaction. It's not clear whether the
|
||||
* manager more than once per transaction. It's not clear whether the
|
||||
* performance penalty is material in practice, but for now, we do it this
|
||||
* way.
|
||||
*/
|
||||
@ -567,7 +567,7 @@ nextval_internal(Oid relid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide whether we should emit a WAL log record. If so, force up the
|
||||
* Decide whether we should emit a WAL log record. If so, force up the
|
||||
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
|
||||
* cache. (These will then be usable without logging.)
|
||||
*
|
||||
@ -674,7 +674,7 @@ nextval_internal(Oid relid)
|
||||
* We must mark the buffer dirty before doing XLogInsert(); see notes in
|
||||
* SyncOneBuffer(). However, we don't apply the desired changes just yet.
|
||||
* This looks like a violation of the buffer update protocol, but it is in
|
||||
* fact safe because we hold exclusive lock on the buffer. Any other
|
||||
* fact safe because we hold exclusive lock on the buffer. Any other
|
||||
* process, including a checkpoint, that tries to examine the buffer
|
||||
* contents will block until we release the lock, and then will see the
|
||||
* final state that we install below.
|
||||
@ -936,7 +936,7 @@ setval3_oid(PG_FUNCTION_ARGS)
|
||||
* Open the sequence and acquire AccessShareLock if needed
|
||||
*
|
||||
* If we haven't touched the sequence already in this transaction,
|
||||
* we need to acquire AccessShareLock. We arrange for the lock to
|
||||
* we need to acquire AccessShareLock. We arrange for the lock to
|
||||
* be owned by the top transaction, so that we don't need to do it
|
||||
* more than once per xact.
|
||||
*/
|
||||
@ -1037,7 +1037,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
|
||||
|
||||
/*
|
||||
* If the sequence has been transactionally replaced since we last saw it,
|
||||
* discard any cached-but-unissued values. We do not touch the currval()
|
||||
* discard any cached-but-unissued values. We do not touch the currval()
|
||||
* state, however.
|
||||
*/
|
||||
if (seqrel->rd_rel->relfilenode != elm->filenode)
|
||||
@ -1554,13 +1554,13 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
|
||||
/*
|
||||
* We always reinit the page. However, since this WAL record type is
|
||||
* also used for updating sequences, it's possible that a hot-standby
|
||||
* backend is examining the page concurrently; so we mustn't transiently
|
||||
* trash the buffer. The solution is to build the correct new page
|
||||
* contents in local workspace and then memcpy into the buffer. Then only
|
||||
* bytes that are supposed to change will change, even transiently. We
|
||||
* must palloc the local page for alignment reasons.
|
||||
* We always reinit the page. However, since this WAL record type is also
|
||||
* used for updating sequences, it's possible that a hot-standby backend
|
||||
* is examining the page concurrently; so we mustn't transiently trash the
|
||||
* buffer. The solution is to build the correct new page contents in
|
||||
* local workspace and then memcpy into the buffer. Then only bytes that
|
||||
* are supposed to change will change, even transiently. We must palloc
|
||||
* the local page for alignment reasons.
|
||||
*/
|
||||
localpage = (Page) palloc(BufferGetPageSize(buffer));
|
||||
|
||||
|
@ -276,7 +276,7 @@ static void AlterSeqNamespaces(Relation classRel, Relation rel,
|
||||
Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved,
|
||||
LOCKMODE lockmode);
|
||||
static void ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
|
||||
bool recurse, bool recursing, LOCKMODE lockmode);
|
||||
bool recurse, bool recursing, LOCKMODE lockmode);
|
||||
static void ATExecValidateConstraint(Relation rel, char *constrName,
|
||||
bool recurse, bool recursing, LOCKMODE lockmode);
|
||||
static int transformColumnNameList(Oid relId, List *colList,
|
||||
@ -557,7 +557,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
|
||||
&inheritOids, &old_constraints, &parentOidCount);
|
||||
|
||||
/*
|
||||
* Create a tuple descriptor from the relation schema. Note that this
|
||||
* Create a tuple descriptor from the relation schema. Note that this
|
||||
* deals with column names, types, and NOT NULL constraints, but not
|
||||
* default values or CHECK constraints; we handle those below.
|
||||
*/
|
||||
@ -657,7 +657,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
|
||||
CommandCounterIncrement();
|
||||
|
||||
/*
|
||||
* Open the new relation and acquire exclusive lock on it. This isn't
|
||||
* Open the new relation and acquire exclusive lock on it. This isn't
|
||||
* really necessary for locking out other backends (since they can't see
|
||||
* the new rel anyway until we commit), but it keeps the lock manager from
|
||||
* complaining about deadlock risks.
|
||||
@ -702,7 +702,7 @@ DropErrorMsgNonExistent(RangeVar *rel, char rightkind, bool missing_ok)
|
||||
{
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_SCHEMA),
|
||||
errmsg("schema \"%s\" does not exist", rel->schemaname)));
|
||||
errmsg("schema \"%s\" does not exist", rel->schemaname)));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1022,10 +1022,10 @@ ExecuteTruncate(TruncateStmt *stmt)
|
||||
}
|
||||
|
||||
/*
|
||||
* In CASCADE mode, suck in all referencing relations as well. This
|
||||
* In CASCADE mode, suck in all referencing relations as well. This
|
||||
* requires multiple iterations to find indirectly-dependent relations. At
|
||||
* each phase, we need to exclusive-lock new rels before looking for their
|
||||
* dependencies, else we might miss something. Also, we check each rel as
|
||||
* dependencies, else we might miss something. Also, we check each rel as
|
||||
* soon as we open it, to avoid a faux pas such as holding lock for a long
|
||||
* time on a rel we have no permissions for.
|
||||
*/
|
||||
@ -1246,7 +1246,7 @@ ExecuteTruncate(TruncateStmt *stmt)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
|
||||
* Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
|
||||
*/
|
||||
static void
|
||||
truncate_check_rel(Relation rel)
|
||||
@ -1674,7 +1674,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
|
||||
|
||||
/*
|
||||
* Now copy the CHECK constraints of this parent, adjusting attnos
|
||||
* using the completed newattno[] map. Identically named constraints
|
||||
* using the completed newattno[] map. Identically named constraints
|
||||
* are merged if possible, else we throw error.
|
||||
*/
|
||||
if (constr && constr->num_check > 0)
|
||||
@ -1735,7 +1735,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
|
||||
|
||||
/*
|
||||
* Close the parent rel, but keep our AccessShareLock on it until xact
|
||||
* commit. That will prevent someone else from deleting or ALTERing
|
||||
* commit. That will prevent someone else from deleting or ALTERing
|
||||
* the parent before the child is committed.
|
||||
*/
|
||||
heap_close(relation, NoLock);
|
||||
@ -2243,7 +2243,7 @@ renameatt_internal(Oid myrelid,
|
||||
oldattname)));
|
||||
|
||||
/*
|
||||
* if the attribute is inherited, forbid the renaming. if this is a
|
||||
* if the attribute is inherited, forbid the renaming. if this is a
|
||||
* top-level call to renameatt(), then expected_parents will be 0, so the
|
||||
* effect of this code will be to prohibit the renaming if the attribute
|
||||
* is inherited at all. if this is a recursive call to renameatt(),
|
||||
@ -2547,7 +2547,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal)
|
||||
newrelname)));
|
||||
|
||||
/*
|
||||
* Update pg_class tuple with new relname. (Scribbling on reltup is OK
|
||||
* Update pg_class tuple with new relname. (Scribbling on reltup is OK
|
||||
* because it's a copy...)
|
||||
*/
|
||||
namestrcpy(&(relform->relname), newrelname);
|
||||
@ -2603,7 +2603,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal)
|
||||
* We also reject these commands if there are any pending AFTER trigger events
|
||||
* for the rel. This is certainly necessary for the rewriting variants of
|
||||
* ALTER TABLE, because they don't preserve tuple TIDs and so the pending
|
||||
* events would try to fetch the wrong tuples. It might be overly cautious
|
||||
* events would try to fetch the wrong tuples. It might be overly cautious
|
||||
* in other cases, but again it seems better to err on the side of paranoia.
|
||||
*
|
||||
* REINDEX calls this with "rel" referencing the index to be rebuilt; here
|
||||
@ -2659,23 +2659,23 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
|
||||
* 3. Scan table(s) to check new constraints, and optionally recopy
|
||||
* the data into new table(s).
|
||||
* Phase 3 is not performed unless one or more of the subcommands requires
|
||||
* it. The intention of this design is to allow multiple independent
|
||||
* it. The intention of this design is to allow multiple independent
|
||||
* updates of the table schema to be performed with only one pass over the
|
||||
* data.
|
||||
*
|
||||
* ATPrepCmd performs phase 1. A "work queue" entry is created for
|
||||
* ATPrepCmd performs phase 1. A "work queue" entry is created for
|
||||
* each table to be affected (there may be multiple affected tables if the
|
||||
* commands traverse a table inheritance hierarchy). Also we do preliminary
|
||||
* validation of the subcommands, including parse transformation of those
|
||||
* expressions that need to be evaluated with respect to the old table
|
||||
* schema.
|
||||
*
|
||||
* ATRewriteCatalogs performs phase 2 for each affected table. (Note that
|
||||
* ATRewriteCatalogs performs phase 2 for each affected table. (Note that
|
||||
* phases 2 and 3 normally do no explicit recursion, since phase 1 already
|
||||
* did it --- although some subcommands have to recurse in phase 2 instead.)
|
||||
* Certain subcommands need to be performed before others to avoid
|
||||
* unnecessary conflicts; for example, DROP COLUMN should come before
|
||||
* ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
|
||||
* ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
|
||||
* lists, one for each logical "pass" of phase 2.
|
||||
*
|
||||
* ATRewriteTables performs phase 3 for those tables that need it.
|
||||
@ -2782,17 +2782,18 @@ AlterTableGetLockLevel(List *cmds)
|
||||
* to SELECT */
|
||||
case AT_SetTableSpace: /* must rewrite heap */
|
||||
case AT_AlterColumnType: /* must rewrite heap */
|
||||
case AT_AddOids: /* must rewrite heap */
|
||||
case AT_AddOids: /* must rewrite heap */
|
||||
cmd_lockmode = AccessExclusiveLock;
|
||||
break;
|
||||
|
||||
/*
|
||||
* These subcommands may require addition of toast tables. If we
|
||||
* add a toast table to a table currently being scanned, we
|
||||
* These subcommands may require addition of toast tables. If
|
||||
* we add a toast table to a table currently being scanned, we
|
||||
* might miss data added to the new toast table by concurrent
|
||||
* insert transactions.
|
||||
*/
|
||||
case AT_SetStorage: /* may add toast tables, see ATRewriteCatalogs() */
|
||||
case AT_SetStorage:/* may add toast tables, see
|
||||
* ATRewriteCatalogs() */
|
||||
cmd_lockmode = AccessExclusiveLock;
|
||||
break;
|
||||
|
||||
@ -2808,12 +2809,12 @@ AlterTableGetLockLevel(List *cmds)
|
||||
/*
|
||||
* Subcommands that may be visible to concurrent SELECTs
|
||||
*/
|
||||
case AT_DropColumn: /* change visible to SELECT */
|
||||
case AT_DropColumn: /* change visible to SELECT */
|
||||
case AT_AddColumnToView: /* CREATE VIEW */
|
||||
case AT_DropOids: /* calls AT_DropColumn */
|
||||
case AT_DropOids: /* calls AT_DropColumn */
|
||||
case AT_EnableAlwaysRule: /* may change SELECT rules */
|
||||
case AT_EnableReplicaRule: /* may change SELECT rules */
|
||||
case AT_EnableRule: /* may change SELECT rules */
|
||||
case AT_EnableRule: /* may change SELECT rules */
|
||||
case AT_DisableRule: /* may change SELECT rules */
|
||||
cmd_lockmode = AccessExclusiveLock;
|
||||
break;
|
||||
@ -2834,8 +2835,8 @@ AlterTableGetLockLevel(List *cmds)
|
||||
break;
|
||||
|
||||
/*
|
||||
* These subcommands affect write operations only.
|
||||
* XXX Theoretically, these could be ShareRowExclusiveLock.
|
||||
* These subcommands affect write operations only. XXX
|
||||
* Theoretically, these could be ShareRowExclusiveLock.
|
||||
*/
|
||||
case AT_ColumnDefault:
|
||||
case AT_ProcessedConstraint: /* becomes AT_AddConstraint */
|
||||
@ -2872,9 +2873,9 @@ AlterTableGetLockLevel(List *cmds)
|
||||
* Cases essentially the same as CREATE INDEX. We
|
||||
* could reduce the lock strength to ShareLock if
|
||||
* we can work out how to allow concurrent catalog
|
||||
* updates.
|
||||
* XXX Might be set down to ShareRowExclusiveLock
|
||||
* but requires further analysis.
|
||||
* updates. XXX Might be set down to
|
||||
* ShareRowExclusiveLock but requires further
|
||||
* analysis.
|
||||
*/
|
||||
cmd_lockmode = AccessExclusiveLock;
|
||||
break;
|
||||
@ -2883,10 +2884,9 @@ AlterTableGetLockLevel(List *cmds)
|
||||
/*
|
||||
* We add triggers to both tables when we add a
|
||||
* Foreign Key, so the lock level must be at least
|
||||
* as strong as CREATE TRIGGER.
|
||||
* XXX Might be set down to ShareRowExclusiveLock
|
||||
* though trigger info is accessed by
|
||||
* pg_get_triggerdef
|
||||
* as strong as CREATE TRIGGER. XXX Might be set
|
||||
* down to ShareRowExclusiveLock though trigger
|
||||
* info is accessed by pg_get_triggerdef
|
||||
*/
|
||||
cmd_lockmode = AccessExclusiveLock;
|
||||
break;
|
||||
@ -2902,8 +2902,8 @@ AlterTableGetLockLevel(List *cmds)
|
||||
* started before us will continue to see the old inheritance
|
||||
* behaviour, while queries started after we commit will see
|
||||
* new behaviour. No need to prevent reads or writes to the
|
||||
* subtable while we hook it up though.
|
||||
* Changing the TupDesc may be a problem, so keep highest lock.
|
||||
* subtable while we hook it up though. Changing the TupDesc
|
||||
* may be a problem, so keep highest lock.
|
||||
*/
|
||||
case AT_AddInherit:
|
||||
case AT_DropInherit:
|
||||
@ -2912,9 +2912,9 @@ AlterTableGetLockLevel(List *cmds)
|
||||
|
||||
/*
|
||||
* These subcommands affect implicit row type conversion. They
|
||||
* have affects similar to CREATE/DROP CAST on queries.
|
||||
* don't provide for invalidating parse trees as a result of
|
||||
* such changes, so we keep these at AccessExclusiveLock.
|
||||
* have affects similar to CREATE/DROP CAST on queries. don't
|
||||
* provide for invalidating parse trees as a result of such
|
||||
* changes, so we keep these at AccessExclusiveLock.
|
||||
*/
|
||||
case AT_AddOf:
|
||||
case AT_DropOf:
|
||||
@ -2940,29 +2940,32 @@ AlterTableGetLockLevel(List *cmds)
|
||||
* updates.
|
||||
*/
|
||||
case AT_SetStatistics: /* Uses MVCC in getTableAttrs() */
|
||||
case AT_ClusterOn: /* Uses MVCC in getIndexes() */
|
||||
case AT_ClusterOn: /* Uses MVCC in getIndexes() */
|
||||
case AT_DropCluster: /* Uses MVCC in getIndexes() */
|
||||
case AT_SetOptions: /* Uses MVCC in getTableAttrs() */
|
||||
case AT_SetOptions: /* Uses MVCC in getTableAttrs() */
|
||||
case AT_ResetOptions: /* Uses MVCC in getTableAttrs() */
|
||||
cmd_lockmode = ShareUpdateExclusiveLock;
|
||||
break;
|
||||
|
||||
case AT_ValidateConstraint: /* Uses MVCC in getConstraints() */
|
||||
case AT_ValidateConstraint: /* Uses MVCC in
|
||||
* getConstraints() */
|
||||
cmd_lockmode = ShareUpdateExclusiveLock;
|
||||
break;
|
||||
|
||||
/*
|
||||
* Rel options are more complex than first appears. Options
|
||||
* are set here for tables, views and indexes; for historical
|
||||
* reasons these can all be used with ALTER TABLE, so we
|
||||
* can't decide between them using the basic grammar.
|
||||
* reasons these can all be used with ALTER TABLE, so we can't
|
||||
* decide between them using the basic grammar.
|
||||
*
|
||||
* XXX Look in detail at each option to determine lock level,
|
||||
* e.g.
|
||||
* cmd_lockmode = GetRelOptionsLockLevel((List *) cmd->def);
|
||||
* e.g. cmd_lockmode = GetRelOptionsLockLevel((List *)
|
||||
* cmd->def);
|
||||
*/
|
||||
case AT_SetRelOptions: /* Uses MVCC in getIndexes() and getTables() */
|
||||
case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and getTables() */
|
||||
case AT_SetRelOptions: /* Uses MVCC in getIndexes() and
|
||||
* getTables() */
|
||||
case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and
|
||||
* getTables() */
|
||||
cmd_lockmode = AccessExclusiveLock;
|
||||
break;
|
||||
|
||||
@ -3209,7 +3212,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
|
||||
cmd->subtype = AT_ValidateConstraintRecurse;
|
||||
pass = AT_PASS_MISC;
|
||||
break;
|
||||
case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */
|
||||
case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */
|
||||
ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW);
|
||||
pass = AT_PASS_MISC;
|
||||
/* This command never recurses */
|
||||
@ -3258,7 +3261,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
|
||||
/*
|
||||
* ATRewriteCatalogs
|
||||
*
|
||||
* Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
|
||||
* Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
|
||||
* dispatched in a "safe" execution order (designed to avoid unnecessary
|
||||
* conflicts).
|
||||
*/
|
||||
@ -3604,8 +3607,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode)
|
||||
if (RelationIsUsedAsCatalogTable(OldHeap))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot rewrite table \"%s\" used as a catalog table",
|
||||
RelationGetRelationName(OldHeap))));
|
||||
errmsg("cannot rewrite table \"%s\" used as a catalog table",
|
||||
RelationGetRelationName(OldHeap))));
|
||||
|
||||
/*
|
||||
* Don't allow rewrite on temp tables of other backends ... their
|
||||
@ -3856,7 +3859,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
|
||||
{
|
||||
/*
|
||||
* All predicate locks on the tuples or pages are about to be made
|
||||
* invalid, because we move tuples around. Promote them to
|
||||
* invalid, because we move tuples around. Promote them to
|
||||
* relation locks.
|
||||
*/
|
||||
TransferPredicateLocksToHeapRelation(oldrel);
|
||||
@ -3946,8 +3949,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
|
||||
HeapTupleSetOid(tuple, tupOid);
|
||||
|
||||
/*
|
||||
* Constraints might reference the tableoid column, so initialize
|
||||
* t_tableOid before evaluating them.
|
||||
* Constraints might reference the tableoid column, so
|
||||
* initialize t_tableOid before evaluating them.
|
||||
*/
|
||||
tuple->t_tableOid = RelationGetRelid(oldrel);
|
||||
}
|
||||
@ -4404,7 +4407,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
|
||||
*
|
||||
* Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
|
||||
* isn't suitable, throw an error. Currently, we require that the type
|
||||
* originated with CREATE TYPE AS. We could support any row type, but doing so
|
||||
* originated with CREATE TYPE AS. We could support any row type, but doing so
|
||||
* would require handling a number of extra corner cases in the DDL commands.
|
||||
*/
|
||||
void
|
||||
@ -4423,7 +4426,7 @@ check_of_type(HeapTuple typetuple)
|
||||
|
||||
/*
|
||||
* Close the parent rel, but keep our AccessShareLock on it until xact
|
||||
* commit. That will prevent someone else from deleting or ALTERing
|
||||
* commit. That will prevent someone else from deleting or ALTERing
|
||||
* the type before the typed table creation/conversion commits.
|
||||
*/
|
||||
relation_close(typeRelation, NoLock);
|
||||
@ -4882,7 +4885,7 @@ add_column_collation_dependency(Oid relid, int32 attnum, Oid collid)
|
||||
/*
|
||||
* ALTER TABLE SET WITH OIDS
|
||||
*
|
||||
* Basically this is an ADD COLUMN for the special OID column. We have
|
||||
* Basically this is an ADD COLUMN for the special OID column. We have
|
||||
* to cons up a ColumnDef node because the ADD COLUMN code needs one.
|
||||
*/
|
||||
static void
|
||||
@ -5352,7 +5355,7 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc
|
||||
*
|
||||
* DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism,
|
||||
* because we have to decide at runtime whether to recurse or not depending
|
||||
* on whether attinhcount goes to zero or not. (We can't check this in a
|
||||
* on whether attinhcount goes to zero or not. (We can't check this in a
|
||||
* static pre-pass because it won't handle multiple inheritance situations
|
||||
* correctly.)
|
||||
*/
|
||||
@ -5600,7 +5603,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
|
||||
|
||||
/*
|
||||
* If TryReuseIndex() stashed a relfilenode for us, we used it for the new
|
||||
* index instead of building from scratch. The DROP of the old edition of
|
||||
* index instead of building from scratch. The DROP of the old edition of
|
||||
* this index will have scheduled the storage for deletion at commit, so
|
||||
* cancel that pending deletion.
|
||||
*/
|
||||
@ -5642,7 +5645,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
|
||||
elog(ERROR, "index \"%s\" is not unique", indexName);
|
||||
|
||||
/*
|
||||
* Determine name to assign to constraint. We require a constraint to
|
||||
* Determine name to assign to constraint. We require a constraint to
|
||||
* have the same name as the underlying index; therefore, use the index's
|
||||
* existing name as the default constraint name, and if the user
|
||||
* explicitly gives some other name for the constraint, rename the index
|
||||
@ -5851,7 +5854,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
||||
|
||||
/*
|
||||
* Check if ONLY was specified with ALTER TABLE. If so, allow the
|
||||
* contraint creation only if there are no children currently. Error out
|
||||
* contraint creation only if there are no children currently. Error out
|
||||
* otherwise.
|
||||
*/
|
||||
if (!recurse && children != NIL)
|
||||
@ -5883,7 +5886,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
||||
/*
|
||||
* Add a foreign-key constraint to a single table
|
||||
*
|
||||
* Subroutine for ATExecAddConstraint. Must already hold exclusive
|
||||
* Subroutine for ATExecAddConstraint. Must already hold exclusive
|
||||
* lock on the rel, and have done appropriate validity checks for it.
|
||||
* We do permissions checks here, however.
|
||||
*/
|
||||
@ -6022,7 +6025,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
|
||||
*
|
||||
* Note that we have to be careful about the difference between the actual
|
||||
* PK column type and the opclass' declared input type, which might be
|
||||
* only binary-compatible with it. The declared opcintype is the right
|
||||
* only binary-compatible with it. The declared opcintype is the right
|
||||
* thing to probe pg_amop with.
|
||||
*/
|
||||
if (numfks != numpks)
|
||||
@ -6179,7 +6182,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
|
||||
|
||||
/*
|
||||
* Upon a change to the cast from the FK column to its pfeqop
|
||||
* operand, revalidate the constraint. For this evaluation, a
|
||||
* operand, revalidate the constraint. For this evaluation, a
|
||||
* binary coercion cast is equivalent to no cast at all. While
|
||||
* type implementors should design implicit casts with an eye
|
||||
* toward consistency of operations like equality, we cannot
|
||||
@ -6197,7 +6200,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
|
||||
* Necessarily, the primary key column must then be of the domain
|
||||
* type. Since the constraint was previously valid, all values on
|
||||
* the foreign side necessarily exist on the primary side and in
|
||||
* turn conform to the domain. Consequently, we need not treat
|
||||
* turn conform to the domain. Consequently, we need not treat
|
||||
* domains specially here.
|
||||
*
|
||||
* Since we require that all collations share the same notion of
|
||||
@ -6207,7 +6210,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
|
||||
* We need not directly consider the PK type. It's necessarily
|
||||
* binary coercible to the opcintype of the unique index column,
|
||||
* and ri_triggers.c will only deal with PK datums in terms of
|
||||
* that opcintype. Changing the opcintype also changes pfeqop.
|
||||
* that opcintype. Changing the opcintype also changes pfeqop.
|
||||
*/
|
||||
old_check_ok = (new_pathtype == old_pathtype &&
|
||||
new_castfunc == old_castfunc &&
|
||||
@ -6300,14 +6303,14 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
|
||||
*/
|
||||
static void
|
||||
ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
|
||||
bool recurse, bool recursing, LOCKMODE lockmode)
|
||||
bool recurse, bool recursing, LOCKMODE lockmode)
|
||||
{
|
||||
Relation conrel;
|
||||
SysScanDesc scan;
|
||||
ScanKeyData key;
|
||||
HeapTuple contuple;
|
||||
Form_pg_constraint currcon = NULL;
|
||||
Constraint *cmdcon = NULL;
|
||||
Constraint *cmdcon = NULL;
|
||||
bool found = false;
|
||||
|
||||
Assert(IsA(cmd->def, Constraint));
|
||||
@ -6374,8 +6377,8 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
|
||||
heap_freetuple(copyTuple);
|
||||
|
||||
/*
|
||||
* Now we need to update the multiple entries in pg_trigger
|
||||
* that implement the constraint.
|
||||
* Now we need to update the multiple entries in pg_trigger that
|
||||
* implement the constraint.
|
||||
*/
|
||||
tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
|
||||
|
||||
@ -6397,7 +6400,7 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
|
||||
CatalogUpdateIndexes(tgrel, copyTuple);
|
||||
|
||||
InvokeObjectPostAlterHook(TriggerRelationId,
|
||||
HeapTupleGetOid(tgtuple), 0);
|
||||
HeapTupleGetOid(tgtuple), 0);
|
||||
|
||||
heap_freetuple(copyTuple);
|
||||
}
|
||||
@ -6619,10 +6622,10 @@ transformColumnNameList(Oid relId, List *colList,
|
||||
* transformFkeyGetPrimaryKey -
|
||||
*
|
||||
* Look up the names, attnums, and types of the primary key attributes
|
||||
* for the pkrel. Also return the index OID and index opclasses of the
|
||||
* for the pkrel. Also return the index OID and index opclasses of the
|
||||
* index supporting the primary key.
|
||||
*
|
||||
* All parameters except pkrel are output parameters. Also, the function
|
||||
* All parameters except pkrel are output parameters. Also, the function
|
||||
* return value is the number of attributes in the primary key.
|
||||
*
|
||||
* Used when the column list in the REFERENCES specification is omitted.
|
||||
@ -6662,7 +6665,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
|
||||
if (indexStruct->indisprimary && IndexIsValid(indexStruct))
|
||||
{
|
||||
/*
|
||||
* Refuse to use a deferrable primary key. This is per SQL spec,
|
||||
* Refuse to use a deferrable primary key. This is per SQL spec,
|
||||
* and there would be a lot of interesting semantic problems if we
|
||||
* tried to allow it.
|
||||
*/
|
||||
@ -7592,7 +7595,7 @@ ATPrepAlterColumnType(List **wqueue,
|
||||
tab->relkind == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
/*
|
||||
* For composite types, do this check now. Tables will check it later
|
||||
* For composite types, do this check now. Tables will check it later
|
||||
* when the table is being rewritten.
|
||||
*/
|
||||
find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL);
|
||||
@ -7601,7 +7604,7 @@ ATPrepAlterColumnType(List **wqueue,
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
/*
|
||||
* The recursion case is handled by ATSimpleRecursion. However, if we are
|
||||
* The recursion case is handled by ATSimpleRecursion. However, if we are
|
||||
* told not to recurse, there had better not be any child tables; else the
|
||||
* alter would put them out of step.
|
||||
*/
|
||||
@ -7710,7 +7713,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
*
|
||||
* We remove any implicit coercion steps at the top level of the old
|
||||
* default expression; this has been agreed to satisfy the principle of
|
||||
* least surprise. (The conversion to the new column type should act like
|
||||
* least surprise. (The conversion to the new column type should act like
|
||||
* it started from what the user sees as the stored expression, and the
|
||||
* implicit coercions aren't going to be shown.)
|
||||
*/
|
||||
@ -7739,7 +7742,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
* and record enough information to let us recreate the objects.
|
||||
*
|
||||
* The actual recreation does not happen here, but only after we have
|
||||
* performed all the individual ALTER TYPE operations. We have to save
|
||||
* performed all the individual ALTER TYPE operations. We have to save
|
||||
* the info before executing ALTER TYPE, though, else the deparser will
|
||||
* get confused.
|
||||
*
|
||||
@ -7868,7 +7871,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
* used in the trigger's WHEN condition. The first case would
|
||||
* not require any extra work, but the second case would
|
||||
* require updating the WHEN expression, which will take a
|
||||
* significant amount of new code. Since we can't easily tell
|
||||
* significant amount of new code. Since we can't easily tell
|
||||
* which case applies, we punt for both. FIXME someday.
|
||||
*/
|
||||
ereport(ERROR,
|
||||
@ -8144,24 +8147,24 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
|
||||
|
||||
/*
|
||||
* Re-parse the index and constraint definitions, and attach them to the
|
||||
* appropriate work queue entries. We do this before dropping because in
|
||||
* appropriate work queue entries. We do this before dropping because in
|
||||
* the case of a FOREIGN KEY constraint, we might not yet have exclusive
|
||||
* lock on the table the constraint is attached to, and we need to get
|
||||
* that before dropping. It's safe because the parser won't actually look
|
||||
* at the catalogs to detect the existing entry.
|
||||
*
|
||||
* We can't rely on the output of deparsing to tell us which relation
|
||||
* to operate on, because concurrent activity might have made the name
|
||||
* We can't rely on the output of deparsing to tell us which relation to
|
||||
* operate on, because concurrent activity might have made the name
|
||||
* resolve differently. Instead, we've got to use the OID of the
|
||||
* constraint or index we're processing to figure out which relation
|
||||
* to operate on.
|
||||
* constraint or index we're processing to figure out which relation to
|
||||
* operate on.
|
||||
*/
|
||||
forboth(oid_item, tab->changedConstraintOids,
|
||||
def_item, tab->changedConstraintDefs)
|
||||
{
|
||||
Oid oldId = lfirst_oid(oid_item);
|
||||
Oid relid;
|
||||
Oid confrelid;
|
||||
Oid oldId = lfirst_oid(oid_item);
|
||||
Oid relid;
|
||||
Oid confrelid;
|
||||
|
||||
get_constraint_relation_oids(oldId, &relid, &confrelid);
|
||||
ATPostAlterTypeParse(oldId, relid, confrelid,
|
||||
@ -8171,8 +8174,8 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
|
||||
forboth(oid_item, tab->changedIndexOids,
|
||||
def_item, tab->changedIndexDefs)
|
||||
{
|
||||
Oid oldId = lfirst_oid(oid_item);
|
||||
Oid relid;
|
||||
Oid oldId = lfirst_oid(oid_item);
|
||||
Oid relid;
|
||||
|
||||
relid = IndexGetRelation(oldId, false);
|
||||
ATPostAlterTypeParse(oldId, relid, InvalidOid,
|
||||
@ -8238,9 +8241,9 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd,
|
||||
cmd));
|
||||
else if (IsA(stmt, AlterTableStmt))
|
||||
querytree_list = list_concat(querytree_list,
|
||||
transformAlterTableStmt(oldRelId,
|
||||
transformAlterTableStmt(oldRelId,
|
||||
(AlterTableStmt *) stmt,
|
||||
cmd));
|
||||
cmd));
|
||||
else
|
||||
querytree_list = lappend(querytree_list, stmt);
|
||||
}
|
||||
@ -8925,13 +8928,13 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
|
||||
if (check_option)
|
||||
{
|
||||
const char *view_updatable_error =
|
||||
view_query_is_auto_updatable(view_query, true);
|
||||
view_query_is_auto_updatable(view_query, true);
|
||||
|
||||
if (view_updatable_error)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("WITH CHECK OPTION is supported only on auto-updatable views"),
|
||||
errhint("%s", view_updatable_error)));
|
||||
errmsg("WITH CHECK OPTION is supported only on auto-updatable views"),
|
||||
errhint("%s", view_updatable_error)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -9098,7 +9101,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode)
|
||||
/* Fetch the list of indexes on toast relation if necessary */
|
||||
if (OidIsValid(reltoastrelid))
|
||||
{
|
||||
Relation toastRel = relation_open(reltoastrelid, lockmode);
|
||||
Relation toastRel = relation_open(reltoastrelid, lockmode);
|
||||
|
||||
reltoastidxids = RelationGetIndexList(toastRel);
|
||||
relation_close(toastRel, lockmode);
|
||||
}
|
||||
@ -9120,8 +9124,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode)
|
||||
FlushRelationBuffers(rel);
|
||||
|
||||
/*
|
||||
* Relfilenodes are not unique in databases across tablespaces, so we
|
||||
* need to allocate a new one in the new tablespace.
|
||||
* Relfilenodes are not unique in databases across tablespaces, so we need
|
||||
* to allocate a new one in the new tablespace.
|
||||
*/
|
||||
newrelfilenode = GetNewRelFileNode(newTableSpace, NULL,
|
||||
rel->rd_rel->relpersistence);
|
||||
@ -9236,9 +9240,9 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
|
||||
forkNum))));
|
||||
|
||||
/*
|
||||
* WAL-log the copied page. Unfortunately we don't know what kind of
|
||||
* a page this is, so we have to log the full page including any
|
||||
* unused space.
|
||||
* WAL-log the copied page. Unfortunately we don't know what kind of a
|
||||
* page this is, so we have to log the full page including any unused
|
||||
* space.
|
||||
*/
|
||||
if (use_wal)
|
||||
log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false);
|
||||
@ -9246,7 +9250,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
|
||||
PageSetChecksumInplace(page, blkno);
|
||||
|
||||
/*
|
||||
* Now write the page. We say isTemp = true even if it's not a temp
|
||||
* Now write the page. We say isTemp = true even if it's not a temp
|
||||
* rel, because there's no need for smgr to schedule an fsync for this
|
||||
* write; we'll do it ourselves below.
|
||||
*/
|
||||
@ -9256,7 +9260,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
|
||||
pfree(buf);
|
||||
|
||||
/*
|
||||
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
|
||||
* If the rel is WAL-logged, must fsync before commit. We use heap_sync
|
||||
* to ensure that the toast table gets fsync'd too. (For a temp or
|
||||
* unlogged rel we don't care since the data will be gone after a crash
|
||||
* anyway.)
|
||||
@ -9431,7 +9435,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
|
||||
MergeConstraintsIntoExisting(child_rel, parent_rel);
|
||||
|
||||
/*
|
||||
* OK, it looks valid. Make the catalog entries that show inheritance.
|
||||
* OK, it looks valid. Make the catalog entries that show inheritance.
|
||||
*/
|
||||
StoreCatalogInheritance1(RelationGetRelid(child_rel),
|
||||
RelationGetRelid(parent_rel),
|
||||
@ -9907,7 +9911,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
|
||||
* Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
|
||||
* INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
|
||||
* heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
|
||||
* be TypeRelationId). There's no convenient way to do this, so go trawling
|
||||
* be TypeRelationId). There's no convenient way to do this, so go trawling
|
||||
* through pg_depend.
|
||||
*/
|
||||
static void
|
||||
@ -10093,7 +10097,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
|
||||
/*
|
||||
* ALTER TABLE NOT OF
|
||||
*
|
||||
* Detach a typed table from its originating type. Just clear reloftype and
|
||||
* Detach a typed table from its originating type. Just clear reloftype and
|
||||
* remove the dependency.
|
||||
*/
|
||||
static void
|
||||
@ -10155,7 +10159,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
|
||||
*/
|
||||
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
|
||||
pg_class_tuple = SearchSysCacheCopy1(RELOID,
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)));
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)));
|
||||
if (!HeapTupleIsValid(pg_class_tuple))
|
||||
elog(ERROR, "cache lookup failed for relation \"%s\"",
|
||||
RelationGetRelationName(rel));
|
||||
@ -10191,8 +10195,8 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the indisreplident flag from any index that had it previously, and
|
||||
* set it for any index that should have it now.
|
||||
* Clear the indisreplident flag from any index that had it previously,
|
||||
* and set it for any index that should have it now.
|
||||
*/
|
||||
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
|
||||
foreach(index, RelationGetIndexList(rel))
|
||||
@ -10201,7 +10205,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
|
||||
bool dirty = false;
|
||||
|
||||
pg_index_tuple = SearchSysCacheCopy1(INDEXRELID,
|
||||
ObjectIdGetDatum(thisIndexOid));
|
||||
ObjectIdGetDatum(thisIndexOid));
|
||||
if (!HeapTupleIsValid(pg_index_tuple))
|
||||
elog(ERROR, "cache lookup failed for index %u", thisIndexOid);
|
||||
pg_index_form = (Form_pg_index) GETSTRUCT(pg_index_tuple);
|
||||
@ -10261,7 +10265,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
|
||||
}
|
||||
else if (stmt->identity_type == REPLICA_IDENTITY_INDEX)
|
||||
{
|
||||
/* fallthrough */;
|
||||
/* fallthrough */ ;
|
||||
}
|
||||
else
|
||||
elog(ERROR, "unexpected identity type %u", stmt->identity_type);
|
||||
@ -10289,20 +10293,20 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
|
||||
if (!indexRel->rd_am->amcanunique || !indexRel->rd_index->indisunique)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot use non-unique index \"%s\" as replica identity",
|
||||
RelationGetRelationName(indexRel))));
|
||||
errmsg("cannot use non-unique index \"%s\" as replica identity",
|
||||
RelationGetRelationName(indexRel))));
|
||||
/* Deferred indexes are not guaranteed to be always unique. */
|
||||
if (!indexRel->rd_index->indimmediate)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use non-immediate index \"%s\" as replica identity",
|
||||
RelationGetRelationName(indexRel))));
|
||||
errmsg("cannot use non-immediate index \"%s\" as replica identity",
|
||||
RelationGetRelationName(indexRel))));
|
||||
/* Expression indexes aren't supported. */
|
||||
if (RelationGetIndexExpressions(indexRel) != NIL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use expression index \"%s\" as replica identity",
|
||||
RelationGetRelationName(indexRel))));
|
||||
errmsg("cannot use expression index \"%s\" as replica identity",
|
||||
RelationGetRelationName(indexRel))));
|
||||
/* Predicate indexes aren't supported. */
|
||||
if (RelationGetIndexPredicate(indexRel) != NIL)
|
||||
ereport(ERROR,
|
||||
@ -10319,7 +10323,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
|
||||
/* Check index for nullable columns. */
|
||||
for (key = 0; key < indexRel->rd_index->indnatts; key++)
|
||||
{
|
||||
int16 attno = indexRel->rd_index->indkey.values[key];
|
||||
int16 attno = indexRel->rd_index->indkey.values[key];
|
||||
Form_pg_attribute attr;
|
||||
|
||||
/* Of the system columns, only oid is indexable. */
|
||||
@ -10878,7 +10882,7 @@ AtEOXact_on_commit_actions(bool isCommit)
|
||||
* Post-subcommit or post-subabort cleanup for ON COMMIT management.
|
||||
*
|
||||
* During subabort, we can immediately remove entries created during this
|
||||
* subtransaction. During subcommit, just relabel entries marked during
|
||||
* subtransaction. During subcommit, just relabel entries marked during
|
||||
* this subtransaction as being the parent's responsibility.
|
||||
*/
|
||||
void
|
||||
@ -10922,7 +10926,7 @@ AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid,
|
||||
* This is intended as a callback for RangeVarGetRelidExtended(). It allows
|
||||
* the relation to be locked only if (1) it's a plain table, materialized
|
||||
* view, or TOAST table and (2) the current user is the owner (or the
|
||||
* superuser). This meets the permission-checking needs of CLUSTER, REINDEX
|
||||
* superuser). This meets the permission-checking needs of CLUSTER, REINDEX
|
||||
* TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be
|
||||
* used by all.
|
||||
*/
|
||||
@ -10939,7 +10943,7 @@ RangeVarCallbackOwnsTable(const RangeVar *relation,
|
||||
/*
|
||||
* If the relation does exist, check whether it's an index. But note that
|
||||
* the relation might have been dropped between the time we did the name
|
||||
* lookup and now. In that case, there's nothing to do.
|
||||
* lookup and now. In that case, there's nothing to do.
|
||||
*/
|
||||
relkind = get_rel_relkind(relId);
|
||||
if (!relkind)
|
||||
@ -11105,8 +11109,8 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
|
||||
relkind != RELKIND_FOREIGN_TABLE)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table",
|
||||
rv->relname)));
|
||||
errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table",
|
||||
rv->relname)));
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
* To allow CREATE DATABASE to give a new database a default tablespace
|
||||
* that's different from the template database's default, we make the
|
||||
* provision that a zero in pg_class.reltablespace means the database's
|
||||
* default tablespace. Without this, CREATE DATABASE would have to go in
|
||||
* default tablespace. Without this, CREATE DATABASE would have to go in
|
||||
* and munge the system catalogs of the new database.
|
||||
*
|
||||
*
|
||||
@ -281,7 +281,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
|
||||
* reference the whole path here, but mkdir() uses the first two parts.
|
||||
*/
|
||||
if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 +
|
||||
OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH)
|
||||
OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("tablespace location \"%s\" is too long",
|
||||
@ -488,7 +488,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
|
||||
* Not all files deleted? However, there can be lingering empty files
|
||||
* in the directories, left behind by for example DROP TABLE, that
|
||||
* have been scheduled for deletion at next checkpoint (see comments
|
||||
* in mdunlink() for details). We could just delete them immediately,
|
||||
* in mdunlink() for details). We could just delete them immediately,
|
||||
* but we can't tell them apart from important data files that we
|
||||
* mustn't delete. So instead, we force a checkpoint which will clean
|
||||
* out any lingering files, and try again.
|
||||
@ -562,10 +562,10 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
|
||||
|
||||
linkloc = psprintf("pg_tblspc/%u", tablespaceoid);
|
||||
location_with_version_dir = psprintf("%s/%s", location,
|
||||
TABLESPACE_VERSION_DIRECTORY);
|
||||
TABLESPACE_VERSION_DIRECTORY);
|
||||
|
||||
/*
|
||||
* Attempt to coerce target directory to safe permissions. If this fails,
|
||||
* Attempt to coerce target directory to safe permissions. If this fails,
|
||||
* it doesn't exist or has the wrong owner.
|
||||
*/
|
||||
if (chmod(location, S_IRWXU) != 0)
|
||||
@ -666,7 +666,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
|
||||
* Attempt to remove filesystem infrastructure for the tablespace.
|
||||
*
|
||||
* 'redo' indicates we are redoing a drop from XLOG; in that case we should
|
||||
* not throw an ERROR for problems, just LOG them. The worst consequence of
|
||||
* not throw an ERROR for problems, just LOG them. The worst consequence of
|
||||
* not removing files here would be failure to release some disk space, which
|
||||
* does not justify throwing an error that would require manual intervention
|
||||
* to get the database running again.
|
||||
@ -684,7 +684,7 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
|
||||
struct stat st;
|
||||
|
||||
linkloc_with_version_dir = psprintf("pg_tblspc/%u/%s", tablespaceoid,
|
||||
TABLESPACE_VERSION_DIRECTORY);
|
||||
TABLESPACE_VERSION_DIRECTORY);
|
||||
|
||||
/*
|
||||
* Check if the tablespace still contains any files. We try to rmdir each
|
||||
@ -701,10 +701,10 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
|
||||
*
|
||||
* If redo is true then ENOENT is a likely outcome here, and we allow it
|
||||
* to pass without comment. In normal operation we still allow it, but
|
||||
* with a warning. This is because even though ProcessUtility disallows
|
||||
* with a warning. This is because even though ProcessUtility disallows
|
||||
* DROP TABLESPACE in a transaction block, it's possible that a previous
|
||||
* DROP failed and rolled back after removing the tablespace directories
|
||||
* and/or symlink. We want to allow a new DROP attempt to succeed at
|
||||
* and/or symlink. We want to allow a new DROP attempt to succeed at
|
||||
* removing the catalog entries (and symlink if still present), so we
|
||||
* should not give a hard error here.
|
||||
*/
|
||||
@ -1119,8 +1119,8 @@ AlterTableSpaceMove(AlterTableSpaceMoveStmt *stmt)
|
||||
|
||||
/*
|
||||
* Handle permissions-checking here since we are locking the tables
|
||||
* and also to avoid doing a bunch of work only to fail part-way.
|
||||
* Note that permissions will also be checked by AlterTableInternal().
|
||||
* and also to avoid doing a bunch of work only to fail part-way. Note
|
||||
* that permissions will also be checked by AlterTableInternal().
|
||||
*
|
||||
* Caller must be considered an owner on the table to move it.
|
||||
*/
|
||||
@ -1179,7 +1179,7 @@ check_default_tablespace(char **newval, void **extra, GucSource source)
|
||||
{
|
||||
/*
|
||||
* If we aren't inside a transaction, we cannot do database access so
|
||||
* cannot verify the name. Must accept the value on faith.
|
||||
* cannot verify the name. Must accept the value on faith.
|
||||
*/
|
||||
if (IsTransactionState())
|
||||
{
|
||||
@ -1290,7 +1290,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
|
||||
|
||||
/*
|
||||
* If we aren't inside a transaction, we cannot do database access so
|
||||
* cannot verify the individual names. Must accept the list on faith.
|
||||
* cannot verify the individual names. Must accept the list on faith.
|
||||
* Fortunately, there's then also no need to pass the data to fd.c.
|
||||
*/
|
||||
if (IsTransactionState())
|
||||
|
@ -107,7 +107,7 @@ static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
|
||||
*
|
||||
* constraintOid, if nonzero, says that this trigger is being created
|
||||
* internally to implement that constraint. A suitable pg_depend entry will
|
||||
* be made to link the trigger to that constraint. constraintOid is zero when
|
||||
* be made to link the trigger to that constraint. constraintOid is zero when
|
||||
* executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
|
||||
* TRIGGER, we build a pg_constraint entry internally.)
|
||||
*
|
||||
@ -418,7 +418,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
|
||||
if (funcrettype != TRIGGEROID)
|
||||
{
|
||||
/*
|
||||
* We allow OPAQUE just so we can load old dump files. When we see a
|
||||
* We allow OPAQUE just so we can load old dump files. When we see a
|
||||
* trigger function declared OPAQUE, change it to TRIGGER.
|
||||
*/
|
||||
if (funcrettype == OPAQUEOID)
|
||||
@ -440,7 +440,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
|
||||
* references one of the built-in RI_FKey trigger functions, assume it is
|
||||
* from a dump of a pre-7.3 foreign key constraint, and take steps to
|
||||
* convert this legacy representation into a regular foreign key
|
||||
* constraint. Ugly, but necessary for loading old dump files.
|
||||
* constraint. Ugly, but necessary for loading old dump files.
|
||||
*/
|
||||
if (stmt->isconstraint && !isInternal &&
|
||||
list_length(stmt->args) >= 6 &&
|
||||
@ -503,7 +503,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
|
||||
|
||||
/*
|
||||
* If trigger is internally generated, modify the provided trigger name to
|
||||
* ensure uniqueness by appending the trigger OID. (Callers will usually
|
||||
* ensure uniqueness by appending the trigger OID. (Callers will usually
|
||||
* supply a simple constant trigger name in these cases.)
|
||||
*/
|
||||
if (isInternal)
|
||||
@ -627,7 +627,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
|
||||
int16 attnum;
|
||||
int j;
|
||||
|
||||
/* Lookup column name. System columns are not allowed */
|
||||
/* Lookup column name. System columns are not allowed */
|
||||
attnum = attnameAttNum(rel, name, false);
|
||||
if (attnum == InvalidAttrNumber)
|
||||
ereport(ERROR,
|
||||
@ -732,7 +732,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* User CREATE TRIGGER, so place dependencies. We make trigger be
|
||||
* User CREATE TRIGGER, so place dependencies. We make trigger be
|
||||
* auto-dropped if its relation is dropped or if the FK relation is
|
||||
* dropped. (Auto drop is compatible with our pre-7.3 behavior.)
|
||||
*/
|
||||
@ -801,7 +801,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
|
||||
* full-fledged foreign key constraints.
|
||||
*
|
||||
* The conversion is complex because a pre-7.3 foreign key involved three
|
||||
* separate triggers, which were reported separately in dumps. While the
|
||||
* separate triggers, which were reported separately in dumps. While the
|
||||
* single trigger on the referencing table adds no new information, we need
|
||||
* to know the trigger functions of both of the triggers on the referenced
|
||||
* table to build the constraint declaration. Also, due to lack of proper
|
||||
@ -2038,7 +2038,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
if (newtuple != slottuple)
|
||||
{
|
||||
/*
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* the tuple was allocated in per-tuple memory context, and therefore
|
||||
* will go away by itself. The tuple table slot should not try to
|
||||
* clear it.
|
||||
@ -2113,7 +2113,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
if (newtuple != slottuple)
|
||||
{
|
||||
/*
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* the tuple was allocated in per-tuple memory context, and therefore
|
||||
* will go away by itself. The tuple table slot should not try to
|
||||
* clear it.
|
||||
@ -2503,7 +2503,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
|
||||
if (newtuple != slottuple)
|
||||
{
|
||||
/*
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* the tuple was allocated in per-tuple memory context, and therefore
|
||||
* will go away by itself. The tuple table slot should not try to
|
||||
* clear it.
|
||||
@ -2599,7 +2599,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
|
||||
if (newtuple != slottuple)
|
||||
{
|
||||
/*
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* Return the modified tuple using the es_trig_tuple_slot. We assume
|
||||
* the tuple was allocated in per-tuple memory context, and therefore
|
||||
* will go away by itself. The tuple table slot should not try to
|
||||
* clear it.
|
||||
@ -3031,7 +3031,7 @@ typedef SetConstraintStateData *SetConstraintState;
|
||||
* Although this is mutable state, we can keep it in AfterTriggerSharedData
|
||||
* because all instances of the same type of event in a given event list will
|
||||
* be fired at the same time, if they were queued between the same firing
|
||||
* cycles. So we need only ensure that ats_firing_id is zero when attaching
|
||||
* cycles. So we need only ensure that ats_firing_id is zero when attaching
|
||||
* a new event to an existing AfterTriggerSharedData record.
|
||||
*/
|
||||
typedef uint32 TriggerFlags;
|
||||
@ -3077,7 +3077,7 @@ typedef struct AfterTriggerEventDataOneCtid
|
||||
typedef struct AfterTriggerEventDataZeroCtids
|
||||
{
|
||||
TriggerFlags ate_flags; /* status bits and offset to shared data */
|
||||
} AfterTriggerEventDataZeroCtids;
|
||||
} AfterTriggerEventDataZeroCtids;
|
||||
|
||||
#define SizeofTriggerEvent(evt) \
|
||||
(((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
|
||||
@ -3092,7 +3092,7 @@ typedef struct AfterTriggerEventDataZeroCtids
|
||||
/*
|
||||
* To avoid palloc overhead, we keep trigger events in arrays in successively-
|
||||
* larger chunks (a slightly more sophisticated version of an expansible
|
||||
* array). The space between CHUNK_DATA_START and freeptr is occupied by
|
||||
* array). The space between CHUNK_DATA_START and freeptr is occupied by
|
||||
* AfterTriggerEventData records; the space between endfree and endptr is
|
||||
* occupied by AfterTriggerSharedData records.
|
||||
*/
|
||||
@ -3134,7 +3134,7 @@ typedef struct AfterTriggerEventList
|
||||
*
|
||||
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
|
||||
* We mark firable events with the current firing cycle's ID so that we can
|
||||
* tell which ones to work on. This ensures sane behavior if a trigger
|
||||
* tell which ones to work on. This ensures sane behavior if a trigger
|
||||
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
|
||||
* only fire those events that weren't already scheduled for firing.
|
||||
*
|
||||
@ -3142,7 +3142,7 @@ typedef struct AfterTriggerEventList
|
||||
* This is saved and restored across failed subtransactions.
|
||||
*
|
||||
* events is the current list of deferred events. This is global across
|
||||
* all subtransactions of the current transaction. In a subtransaction
|
||||
* all subtransactions of the current transaction. In a subtransaction
|
||||
* abort, we know that the events added by the subtransaction are at the
|
||||
* end of the list, so it is relatively easy to discard them. The event
|
||||
* list chunks themselves are stored in event_cxt.
|
||||
@ -3174,12 +3174,12 @@ typedef struct AfterTriggerEventList
|
||||
* which we similarly use to clean up at subtransaction abort.
|
||||
*
|
||||
* firing_stack is a stack of copies of subtransaction-start-time
|
||||
* firing_counter. We use this to recognize which deferred triggers were
|
||||
* firing_counter. We use this to recognize which deferred triggers were
|
||||
* fired (or marked for firing) within an aborted subtransaction.
|
||||
*
|
||||
* We use GetCurrentTransactionNestLevel() to determine the correct array
|
||||
* index in these stacks. maxtransdepth is the number of allocated entries in
|
||||
* each stack. (By not keeping our own stack pointer, we can avoid trouble
|
||||
* each stack. (By not keeping our own stack pointer, we can avoid trouble
|
||||
* in cases where errors during subxact abort cause multiple invocations
|
||||
* of AfterTriggerEndSubXact() at the same nesting depth.)
|
||||
*/
|
||||
@ -3490,7 +3490,7 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events,
|
||||
* single trigger function.
|
||||
*
|
||||
* Frequently, this will be fired many times in a row for triggers of
|
||||
* a single relation. Therefore, we cache the open relation and provide
|
||||
* a single relation. Therefore, we cache the open relation and provide
|
||||
* fmgr lookup cache space at the caller level. (For triggers fired at
|
||||
* the end of a query, we can even piggyback on the executor's state.)
|
||||
*
|
||||
@ -3566,6 +3566,7 @@ AfterTriggerExecute(AfterTriggerEvent event,
|
||||
}
|
||||
/* fall through */
|
||||
case AFTER_TRIGGER_FDW_REUSE:
|
||||
|
||||
/*
|
||||
* Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
|
||||
* ensures that tg_trigtuple does not reference tuplestore memory.
|
||||
@ -4093,7 +4094,7 @@ AfterTriggerFireDeferred(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Run all the remaining triggers. Loop until they are all gone, in case
|
||||
* Run all the remaining triggers. Loop until they are all gone, in case
|
||||
* some trigger queues more for us to do.
|
||||
*/
|
||||
while (afterTriggerMarkEvents(events, NULL, false))
|
||||
@ -4156,7 +4157,7 @@ AfterTriggerBeginSubXact(void)
|
||||
int my_level = GetCurrentTransactionNestLevel();
|
||||
|
||||
/*
|
||||
* Ignore call if the transaction is in aborted state. (Probably
|
||||
* Ignore call if the transaction is in aborted state. (Probably
|
||||
* shouldn't happen?)
|
||||
*/
|
||||
if (afterTriggers == NULL)
|
||||
@ -4235,7 +4236,7 @@ AfterTriggerEndSubXact(bool isCommit)
|
||||
CommandId subxact_firing_id;
|
||||
|
||||
/*
|
||||
* Ignore call if the transaction is in aborted state. (Probably
|
||||
* Ignore call if the transaction is in aborted state. (Probably
|
||||
* unneeded)
|
||||
*/
|
||||
if (afterTriggers == NULL)
|
||||
@ -4378,7 +4379,7 @@ SetConstraintStateCopy(SetConstraintState origstate)
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a per-trigger item to a SetConstraintState. Returns possibly-changed
|
||||
* Add a per-trigger item to a SetConstraintState. Returns possibly-changed
|
||||
* pointer to the state object (it will change if we have to repalloc).
|
||||
*/
|
||||
static SetConstraintState
|
||||
@ -4463,7 +4464,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
* First, identify all the named constraints and make a list of their
|
||||
* OIDs. Since, unlike the SQL spec, we allow multiple constraints of
|
||||
* the same name within a schema, the specifications are not
|
||||
* necessarily unique. Our strategy is to target all matching
|
||||
* necessarily unique. Our strategy is to target all matching
|
||||
* constraints within the first search-path schema that has any
|
||||
* matches, but disregard matches in schemas beyond the first match.
|
||||
* (This is a bit odd but it's the historical behavior.)
|
||||
@ -4489,7 +4490,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
|
||||
/*
|
||||
* If we're given the schema name with the constraint, look only
|
||||
* in that schema. If given a bare constraint name, use the
|
||||
* in that schema. If given a bare constraint name, use the
|
||||
* search path to find the first matching constraint.
|
||||
*/
|
||||
if (constraint->schemaname)
|
||||
@ -4593,7 +4594,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
|
||||
/*
|
||||
* Silently skip triggers that are marked as non-deferrable in
|
||||
* pg_trigger. This is not an error condition, since a
|
||||
* pg_trigger. This is not an error condition, since a
|
||||
* deferrable RI constraint may have some non-deferrable
|
||||
* actions.
|
||||
*/
|
||||
@ -4664,7 +4665,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
|
||||
/*
|
||||
* Make sure a snapshot has been established in case trigger
|
||||
* functions need one. Note that we avoid setting a snapshot if
|
||||
* functions need one. Note that we avoid setting a snapshot if
|
||||
* we don't find at least one trigger that has to be fired now.
|
||||
* This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
|
||||
* ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
|
||||
@ -4724,7 +4725,7 @@ AfterTriggerPendingOnRel(Oid relid)
|
||||
AfterTriggerShared evtshared = GetTriggerSharedData(event);
|
||||
|
||||
/*
|
||||
* We can ignore completed events. (Even if a DONE flag is rolled
|
||||
* We can ignore completed events. (Even if a DONE flag is rolled
|
||||
* back by subxact abort, it's OK because the effects of the TRUNCATE
|
||||
* or whatever must get rolled back too.)
|
||||
*/
|
||||
@ -4765,7 +4766,7 @@ AfterTriggerPendingOnRel(Oid relid)
|
||||
* be fired for an event.
|
||||
*
|
||||
* NOTE: this is called whenever there are any triggers associated with
|
||||
* the event (even if they are disabled). This function decides which
|
||||
* the event (even if they are disabled). This function decides which
|
||||
* triggers actually need to be queued.
|
||||
* ----------
|
||||
*/
|
||||
|
@ -514,8 +514,8 @@ DefineType(List *names, List *parameters)
|
||||
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
|
||||
|
||||
/*
|
||||
* Check permissions on functions. We choose to require the creator/owner
|
||||
* of a type to also own the underlying functions. Since creating a type
|
||||
* Check permissions on functions. We choose to require the creator/owner
|
||||
* of a type to also own the underlying functions. Since creating a type
|
||||
* is tantamount to granting public execute access on the functions, the
|
||||
* minimum sane check would be for execute-with-grant-option. But we
|
||||
* don't have a way to make the type go away if the grant option is
|
||||
@ -552,7 +552,7 @@ DefineType(List *names, List *parameters)
|
||||
* now have TypeCreate do all the real work.
|
||||
*
|
||||
* Note: the pg_type.oid is stored in user tables as array elements (base
|
||||
* types) in ArrayType and in composite types in DatumTupleFields. This
|
||||
* types) in ArrayType and in composite types in DatumTupleFields. This
|
||||
* oid must be preserved by binary upgrades.
|
||||
*/
|
||||
typoid =
|
||||
@ -725,7 +725,7 @@ DefineDomain(CreateDomainStmt *stmt)
|
||||
get_namespace_name(domainNamespace));
|
||||
|
||||
/*
|
||||
* Check for collision with an existing type name. If there is one and
|
||||
* Check for collision with an existing type name. If there is one and
|
||||
* it's an autogenerated array, we can rename it out of the way.
|
||||
*/
|
||||
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
|
||||
@ -1076,7 +1076,7 @@ DefineEnum(CreateEnumStmt *stmt)
|
||||
get_namespace_name(enumNamespace));
|
||||
|
||||
/*
|
||||
* Check for collision with an existing type name. If there is one and
|
||||
* Check for collision with an existing type name. If there is one and
|
||||
* it's an autogenerated array, we can rename it out of the way.
|
||||
*/
|
||||
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
|
||||
@ -1193,7 +1193,7 @@ AlterEnum(AlterEnumStmt *stmt, bool isTopLevel)
|
||||
/*
|
||||
* Ordinarily we disallow adding values within transaction blocks, because
|
||||
* we can't cope with enum OID values getting into indexes and then having
|
||||
* their defining pg_enum entries go away. However, it's okay if the enum
|
||||
* their defining pg_enum entries go away. However, it's okay if the enum
|
||||
* type was created in the current transaction, since then there can be no
|
||||
* such indexes that wouldn't themselves go away on rollback. (We support
|
||||
* this case because pg_dump --binary-upgrade needs it.) We test this by
|
||||
@ -1515,7 +1515,7 @@ DefineRange(CreateRangeStmt *stmt)
|
||||
* impossible to define a polymorphic constructor; we have to generate new
|
||||
* constructor functions explicitly for each range type.
|
||||
*
|
||||
* We actually define 4 functions, with 0 through 3 arguments. This is just
|
||||
* We actually define 4 functions, with 0 through 3 arguments. This is just
|
||||
* to offer more convenience for the user.
|
||||
*/
|
||||
static void
|
||||
@ -2277,7 +2277,7 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
/*
|
||||
* In principle the auxiliary information for this
|
||||
* error should be errdatatype(), but errtablecol()
|
||||
* seems considerably more useful in practice. Since
|
||||
* seems considerably more useful in practice. Since
|
||||
* this code only executes in an ALTER DOMAIN command,
|
||||
* the client should already know which domain is in
|
||||
* question.
|
||||
@ -2300,7 +2300,7 @@ AlterDomainNotNull(List *names, bool notNull)
|
||||
}
|
||||
|
||||
/*
|
||||
* Okay to update pg_type row. We can scribble on typTup because it's a
|
||||
* Okay to update pg_type row. We can scribble on typTup because it's a
|
||||
* copy.
|
||||
*/
|
||||
typTup->typnotnull = notNull;
|
||||
@ -2488,7 +2488,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
|
||||
|
||||
/*
|
||||
* Since all other constraint types throw errors, this must be a check
|
||||
* constraint. First, process the constraint expression and add an entry
|
||||
* constraint. First, process the constraint expression and add an entry
|
||||
* to pg_constraint.
|
||||
*/
|
||||
|
||||
@ -2674,7 +2674,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin)
|
||||
/*
|
||||
* In principle the auxiliary information for this error
|
||||
* should be errdomainconstraint(), but errtablecol()
|
||||
* seems considerably more useful in practice. Since this
|
||||
* seems considerably more useful in practice. Since this
|
||||
* code only executes in an ALTER DOMAIN command, the
|
||||
* client should already know which domain is in question,
|
||||
* and which constraint too.
|
||||
@ -2857,7 +2857,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Okay, add column to result. We store the columns in column-number
|
||||
* Okay, add column to result. We store the columns in column-number
|
||||
* order; this is just a hack to improve predictability of regression
|
||||
* test output ...
|
||||
*/
|
||||
@ -2944,7 +2944,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
|
||||
|
||||
/*
|
||||
* Set up a CoerceToDomainValue to represent the occurrence of VALUE in
|
||||
* the expression. Note that it will appear to have the type of the base
|
||||
* the expression. Note that it will appear to have the type of the base
|
||||
* type, not the domain. This seems correct since within the check
|
||||
* expression, we should not assume the input value can be considered a
|
||||
* member of the domain.
|
||||
@ -3317,7 +3317,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
|
||||
|
||||
/*
|
||||
* If it's a composite type, invoke ATExecChangeOwner so that we fix
|
||||
* up the pg_class entry properly. That will call back to
|
||||
* up the pg_class entry properly. That will call back to
|
||||
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
|
||||
*/
|
||||
if (typTup->typtype == TYPTYPE_COMPOSITE)
|
||||
@ -3464,7 +3464,7 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved)
|
||||
* Caller must have already checked privileges.
|
||||
*
|
||||
* The function automatically recurses to process the type's array type,
|
||||
* if any. isImplicitArray should be TRUE only when doing this internal
|
||||
* if any. isImplicitArray should be TRUE only when doing this internal
|
||||
* recursion (outside callers must never try to move an array type directly).
|
||||
*
|
||||
* If errorOnTableType is TRUE, the function errors out if the type is
|
||||
|
@ -995,7 +995,7 @@ DropRole(DropRoleStmt *stmt)
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
/*
|
||||
* Remove role from the pg_auth_members table. We have to remove all
|
||||
* Remove role from the pg_auth_members table. We have to remove all
|
||||
* tuples that show it as either a role or a member.
|
||||
*
|
||||
* XXX what about grantor entries? Maybe we should do one heap scan.
|
||||
@ -1091,7 +1091,7 @@ RenameRole(const char *oldname, const char *newname)
|
||||
* XXX Client applications probably store the session user somewhere, so
|
||||
* renaming it could cause confusion. On the other hand, there may not be
|
||||
* an actual problem besides a little confusion, so think about this and
|
||||
* decide. Same for SET ROLE ... we don't restrict renaming the current
|
||||
* decide. Same for SET ROLE ... we don't restrict renaming the current
|
||||
* effective userid, though.
|
||||
*/
|
||||
|
||||
@ -1347,7 +1347,7 @@ AddRoleMems(const char *rolename, Oid roleid,
|
||||
|
||||
/*
|
||||
* Check permissions: must have createrole or admin option on the role to
|
||||
* be changed. To mess with a superuser role, you gotta be superuser.
|
||||
* be changed. To mess with a superuser role, you gotta be superuser.
|
||||
*/
|
||||
if (superuser_arg(roleid))
|
||||
{
|
||||
@ -1493,7 +1493,7 @@ DelRoleMems(const char *rolename, Oid roleid,
|
||||
|
||||
/*
|
||||
* Check permissions: must have createrole or admin option on the role to
|
||||
* be changed. To mess with a superuser role, you gotta be superuser.
|
||||
* be changed. To mess with a superuser role, you gotta be superuser.
|
||||
*/
|
||||
if (superuser_arg(roleid))
|
||||
{
|
||||
|
@ -381,18 +381,18 @@ get_rel_oids(Oid relid, const RangeVar *vacrel)
|
||||
*
|
||||
* The output parameters are:
|
||||
* - oldestXmin is the cutoff value used to distinguish whether tuples are
|
||||
* DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
|
||||
* DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
|
||||
* - freezeLimit is the Xid below which all Xids are replaced by
|
||||
* FrozenTransactionId during vacuum.
|
||||
* FrozenTransactionId during vacuum.
|
||||
* - xidFullScanLimit (computed from table_freeze_age parameter)
|
||||
* represents a minimum Xid value; a table whose relfrozenxid is older than
|
||||
* this will have a full-table vacuum applied to it, to freeze tuples across
|
||||
* the whole table. Vacuuming a table younger than this value can use a
|
||||
* partial scan.
|
||||
* represents a minimum Xid value; a table whose relfrozenxid is older than
|
||||
* this will have a full-table vacuum applied to it, to freeze tuples across
|
||||
* the whole table. Vacuuming a table younger than this value can use a
|
||||
* partial scan.
|
||||
* - multiXactCutoff is the value below which all MultiXactIds are removed from
|
||||
* Xmax.
|
||||
* Xmax.
|
||||
* - mxactFullScanLimit is a value against which a table's relminmxid value is
|
||||
* compared to produce a full-table vacuum, as with xidFullScanLimit.
|
||||
* compared to produce a full-table vacuum, as with xidFullScanLimit.
|
||||
*
|
||||
* xidFullScanLimit and mxactFullScanLimit can be passed as NULL if caller is
|
||||
* not interested.
|
||||
@ -417,9 +417,9 @@ vacuum_set_xid_limits(Relation rel,
|
||||
MultiXactId safeMxactLimit;
|
||||
|
||||
/*
|
||||
* We can always ignore processes running lazy vacuum. This is because we
|
||||
* We can always ignore processes running lazy vacuum. This is because we
|
||||
* use these values only for deciding which tuples we must keep in the
|
||||
* tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
|
||||
* tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
|
||||
* ignore it. In theory it could be problematic to ignore lazy vacuums in
|
||||
* a full vacuum, but keep in mind that only one vacuum process can be
|
||||
* working on a particular table at any time, and that each vacuum is
|
||||
@ -566,7 +566,7 @@ vacuum_set_xid_limits(Relation rel,
|
||||
* If we scanned the whole relation then we should just use the count of
|
||||
* live tuples seen; but if we did not, we should not trust the count
|
||||
* unreservedly, especially not in VACUUM, which may have scanned a quite
|
||||
* nonrandom subset of the table. When we have only partial information,
|
||||
* nonrandom subset of the table. When we have only partial information,
|
||||
* we take the old value of pg_class.reltuples as a measurement of the
|
||||
* tuple density in the unscanned pages.
|
||||
*
|
||||
@ -712,7 +712,7 @@ vac_update_relstats(Relation relation,
|
||||
|
||||
/*
|
||||
* If we have discovered that there are no indexes, then there's no
|
||||
* primary key either. This could be done more thoroughly...
|
||||
* primary key either. This could be done more thoroughly...
|
||||
*/
|
||||
if (pgcform->relhaspkey && !hasindex)
|
||||
{
|
||||
@ -772,7 +772,7 @@ vac_update_relstats(Relation relation,
|
||||
* truncate pg_clog and pg_multixact.
|
||||
*
|
||||
* We violate transaction semantics here by overwriting the database's
|
||||
* existing pg_database tuple with the new value. This is reasonably
|
||||
* existing pg_database tuple with the new value. This is reasonably
|
||||
* safe since the new value is correct whether or not this transaction
|
||||
* commits. As with vac_update_relstats, this avoids leaving dead tuples
|
||||
* behind after a VACUUM.
|
||||
@ -892,7 +892,7 @@ vac_update_datfrozenxid(void)
|
||||
* Also update the XID wrap limit info maintained by varsup.c.
|
||||
*
|
||||
* The passed XID is simply the one I just wrote into my pg_database
|
||||
* entry. It's used to initialize the "min" calculation.
|
||||
* entry. It's used to initialize the "min" calculation.
|
||||
*
|
||||
* This routine is only invoked when we've managed to change our
|
||||
* DB's datfrozenxid entry, or we found that the shared XID-wrap-limit
|
||||
@ -976,7 +976,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti)
|
||||
/*
|
||||
* Update the wrap limit for GetNewTransactionId and creation of new
|
||||
* MultiXactIds. Note: these functions will also signal the postmaster
|
||||
* for an(other) autovac cycle if needed. XXX should we avoid possibly
|
||||
* for an(other) autovac cycle if needed. XXX should we avoid possibly
|
||||
* signalling twice?
|
||||
*/
|
||||
SetTransactionIdLimit(frozenXID, oldestxid_datoid);
|
||||
@ -988,7 +988,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti)
|
||||
* vacuum_rel() -- vacuum one heap relation
|
||||
*
|
||||
* Doing one heap at a time incurs extra overhead, since we need to
|
||||
* check that the heap exists again just before we vacuum it. The
|
||||
* check that the heap exists again just before we vacuum it. The
|
||||
* reason that we do this is so that vacuuming can be spread across
|
||||
* many small transactions. Otherwise, two-phase locking would require
|
||||
* us to lock the entire database during one pass of the vacuum cleaner.
|
||||
@ -1045,7 +1045,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for user-requested abort. Note we want this to be inside a
|
||||
* Check for user-requested abort. Note we want this to be inside a
|
||||
* transaction, so xact.c doesn't issue useless WARNING.
|
||||
*/
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
@ -1092,7 +1092,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
|
||||
*
|
||||
* We allow the user to vacuum a table if he is superuser, the table
|
||||
* owner, or the database owner (but in the latter case, only if it's not
|
||||
* a shared relation). pg_class_ownercheck includes the superuser case.
|
||||
* a shared relation). pg_class_ownercheck includes the superuser case.
|
||||
*
|
||||
* Note we choose to treat permissions failure as a WARNING and keep
|
||||
* trying to vacuum the rest of the DB --- is this appropriate?
|
||||
@ -1220,7 +1220,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
|
||||
/*
|
||||
* If the relation has a secondary toast rel, vacuum that too while we
|
||||
* still hold the session lock on the master table. Note however that
|
||||
* "analyze" will not get done on the toast table. This is good, because
|
||||
* "analyze" will not get done on the toast table. This is good, because
|
||||
* the toaster always uses hardcoded index access and statistics are
|
||||
* totally unimportant for toast relations.
|
||||
*/
|
||||
@ -1239,7 +1239,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
|
||||
|
||||
/*
|
||||
* Open all the vacuumable indexes of the given relation, obtaining the
|
||||
* specified kind of lock on each. Return an array of Relation pointers for
|
||||
* specified kind of lock on each. Return an array of Relation pointers for
|
||||
* the indexes into *Irel, and the number of indexes into *nindexes.
|
||||
*
|
||||
* We consider an index vacuumable if it is marked insertable (IndexIsReady).
|
||||
@ -1289,7 +1289,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode,
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the resources acquired by vac_open_indexes. Optionally release
|
||||
* Release the resources acquired by vac_open_indexes. Optionally release
|
||||
* the locks (say NoLock to keep 'em).
|
||||
*/
|
||||
void
|
||||
|
@ -473,7 +473,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
* Before entering the main loop, establish the invariant that
|
||||
* next_not_all_visible_block is the next block number >= blkno that's not
|
||||
* all-visible according to the visibility map, or nblocks if there's no
|
||||
* such block. Also, we set up the skipping_all_visible_blocks flag,
|
||||
* such block. Also, we set up the skipping_all_visible_blocks flag,
|
||||
* which is needed because we need hysteresis in the decision: once we've
|
||||
* started skipping blocks, we may as well skip everything up to the next
|
||||
* not-all-visible block.
|
||||
@ -706,10 +706,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
* It's possible that another backend has extended the heap,
|
||||
* initialized the page, and then failed to WAL-log the page
|
||||
* due to an ERROR. Since heap extension is not WAL-logged,
|
||||
* recovery might try to replay our record setting the
|
||||
* page all-visible and find that the page isn't initialized,
|
||||
* which will cause a PANIC. To prevent that, check whether
|
||||
* the page has been previously WAL-logged, and if not, do that
|
||||
* recovery might try to replay our record setting the page
|
||||
* all-visible and find that the page isn't initialized, which
|
||||
* will cause a PANIC. To prevent that, check whether the
|
||||
* page has been previously WAL-logged, and if not, do that
|
||||
* now.
|
||||
*/
|
||||
if (RelationNeedsWAL(onerel) &&
|
||||
@ -834,8 +834,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
* NB: Like with per-tuple hint bits, we can't set the
|
||||
* PD_ALL_VISIBLE flag if the inserter committed
|
||||
* asynchronously. See SetHintBits for more info. Check
|
||||
* that the tuple is hinted xmin-committed because
|
||||
* of that.
|
||||
* that the tuple is hinted xmin-committed because of
|
||||
* that.
|
||||
*/
|
||||
if (all_visible)
|
||||
{
|
||||
@ -972,7 +972,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
/*
|
||||
* It should never be the case that the visibility map page is set
|
||||
* while the page-level bit is clear, but the reverse is allowed
|
||||
* (if checksums are not enabled). Regardless, set the both bits
|
||||
* (if checksums are not enabled). Regardless, set the both bits
|
||||
* so that we get back in sync.
|
||||
*
|
||||
* NB: If the heap page is all-visible but the VM bit is not set,
|
||||
@ -1034,8 +1034,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
/*
|
||||
* If we remembered any tuples for deletion, then the page will be
|
||||
* visited again by lazy_vacuum_heap, which will compute and record
|
||||
* its post-compaction free space. If not, then we're done with this
|
||||
* page, so remember its free space as-is. (This path will always be
|
||||
* its post-compaction free space. If not, then we're done with this
|
||||
* page, so remember its free space as-is. (This path will always be
|
||||
* taken if there are no indexes.)
|
||||
*/
|
||||
if (vacrelstats->num_dead_tuples == prev_dead_count)
|
||||
@ -1635,9 +1635,9 @@ static void
|
||||
lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
|
||||
{
|
||||
long maxtuples;
|
||||
int vac_work_mem = IsAutoVacuumWorkerProcess() &&
|
||||
autovacuum_work_mem != -1 ?
|
||||
autovacuum_work_mem : maintenance_work_mem;
|
||||
int vac_work_mem = IsAutoVacuumWorkerProcess() &&
|
||||
autovacuum_work_mem != -1 ?
|
||||
autovacuum_work_mem : maintenance_work_mem;
|
||||
|
||||
if (vacrelstats->hasindex)
|
||||
{
|
||||
|
@ -176,7 +176,7 @@ check_datestyle(char **newval, void **extra, GucSource source)
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare the canonical string to return. GUC wants it malloc'd.
|
||||
* Prepare the canonical string to return. GUC wants it malloc'd.
|
||||
*/
|
||||
result = (char *) malloc(32);
|
||||
if (!result)
|
||||
@ -257,7 +257,7 @@ check_timezone(char **newval, void **extra, GucSource source)
|
||||
if (pg_strncasecmp(*newval, "interval", 8) == 0)
|
||||
{
|
||||
/*
|
||||
* Support INTERVAL 'foo'. This is for SQL spec compliance, not
|
||||
* Support INTERVAL 'foo'. This is for SQL spec compliance, not
|
||||
* because it has any actual real-world usefulness.
|
||||
*/
|
||||
const char *valueptr = *newval;
|
||||
@ -281,7 +281,7 @@ check_timezone(char **newval, void **extra, GucSource source)
|
||||
|
||||
/*
|
||||
* Try to parse it. XXX an invalid interval format will result in
|
||||
* ereport(ERROR), which is not desirable for GUC. We did what we
|
||||
* ereport(ERROR), which is not desirable for GUC. We did what we
|
||||
* could to guard against this in flatten_set_variable_args, but a
|
||||
* string coming in from postgresql.conf might contain anything.
|
||||
*/
|
||||
@ -466,7 +466,7 @@ show_log_timezone(void)
|
||||
* We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and
|
||||
* we also always allow changes from read-write to read-only. However,
|
||||
* read-only may be changed to read-write only when in a top-level transaction
|
||||
* that has not yet taken an initial snapshot. Can't do it in a hot standby
|
||||
* that has not yet taken an initial snapshot. Can't do it in a hot standby
|
||||
* slave, either.
|
||||
*
|
||||
* If we are not in a transaction at all, just allow the change; it means
|
||||
@ -627,7 +627,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source)
|
||||
*
|
||||
* We can't roll back the random sequence on error, and we don't want
|
||||
* config file reloads to affect it, so we only want interactive SET SEED
|
||||
* commands to set it. We use the "extra" storage to ensure that rollbacks
|
||||
* commands to set it. We use the "extra" storage to ensure that rollbacks
|
||||
* don't try to do the operation again.
|
||||
*/
|
||||
|
||||
@ -903,7 +903,7 @@ const char *
|
||||
show_role(void)
|
||||
{
|
||||
/*
|
||||
* Check whether SET ROLE is active; if not return "none". This is a
|
||||
* Check whether SET ROLE is active; if not return "none". This is a
|
||||
* kluge to deal with the fact that SET SESSION AUTHORIZATION logically
|
||||
* resets SET ROLE to NONE, but we cannot set the GUC role variable from
|
||||
* assign_session_authorization (because we haven't got enough info to
|
||||
|
@ -52,7 +52,7 @@ validateWithCheckOption(char *value)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("invalid value for \"check_option\" option"),
|
||||
errdetail("Valid values are \"local\", and \"cascaded\".")));
|
||||
errdetail("Valid values are \"local\", and \"cascaded\".")));
|
||||
}
|
||||
}
|
||||
|
||||
@ -344,11 +344,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
|
||||
*rt_entry2;
|
||||
|
||||
/*
|
||||
* Make a copy of the given parsetree. It's not so much that we don't
|
||||
* Make a copy of the given parsetree. It's not so much that we don't
|
||||
* want to scribble on our input, it's that the parser has a bad habit of
|
||||
* outputting multiple links to the same subtree for constructs like
|
||||
* BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a
|
||||
* Var node twice. copyObject will expand any multiply-referenced subtree
|
||||
* Var node twice. copyObject will expand any multiply-referenced subtree
|
||||
* into multiple copies.
|
||||
*/
|
||||
viewParse = (Query *) copyObject(viewParse);
|
||||
@ -460,13 +460,13 @@ DefineView(ViewStmt *stmt, const char *queryString)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the check option is specified, look to see if the view is
|
||||
* actually auto-updatable or not.
|
||||
* If the check option is specified, look to see if the view is actually
|
||||
* auto-updatable or not.
|
||||
*/
|
||||
if (check_option)
|
||||
{
|
||||
const char *view_updatable_error =
|
||||
view_query_is_auto_updatable(viewParse, true);
|
||||
view_query_is_auto_updatable(viewParse, true);
|
||||
|
||||
if (view_updatable_error)
|
||||
ereport(ERROR,
|
||||
@ -513,7 +513,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
|
||||
|
||||
/*
|
||||
* If the user didn't explicitly ask for a temporary view, check whether
|
||||
* we need one implicitly. We allow TEMP to be inserted automatically as
|
||||
* we need one implicitly. We allow TEMP to be inserted automatically as
|
||||
* long as the CREATE command is consistent with that --- no explicit
|
||||
* schema name.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user