mirror of
https://github.com/postgres/postgres.git
synced 2025-11-09 06:21:09 +03:00
Add the ability to create indexes 'concurrently', that is, without
blocking concurrent writes to the table. Greg Stark, with a little help from Tom Lane.
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.142 2006/07/25 19:13:00 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.143 2006/08/25 04:06:46 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -244,8 +244,33 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise we have a definite conflict.
|
||||
* Otherwise we have a definite conflict. But before
|
||||
* complaining, look to see if the tuple we want to insert
|
||||
* is itself now committed dead --- if so, don't complain.
|
||||
* This is a waste of time in normal scenarios but we must
|
||||
* do it to support CREATE INDEX CONCURRENTLY.
|
||||
*/
|
||||
htup.t_self = itup->t_tid;
|
||||
if (heap_fetch(heapRel, SnapshotSelf, &htup, &hbuffer,
|
||||
false, NULL))
|
||||
{
|
||||
/* Normal case --- it's still live */
|
||||
ReleaseBuffer(hbuffer);
|
||||
}
|
||||
else if (htup.t_data != NULL)
|
||||
{
|
||||
/*
|
||||
* It's been deleted, so no error, and no need to
|
||||
* continue searching
|
||||
*/
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* couldn't find the tuple?? */
|
||||
elog(ERROR, "failed to fetch tuple being inserted");
|
||||
}
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNIQUE_VIOLATION),
|
||||
errmsg("duplicate key violates unique constraint \"%s\"",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/bootstrap/bootparse.y,v 1.83 2006/07/31 01:16:36 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/bootstrap/bootparse.y,v 1.84 2006/08/25 04:06:46 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -257,7 +257,7 @@ Boot_DeclareIndexStmt:
|
||||
$10,
|
||||
NULL, NIL, NIL,
|
||||
false, false, false,
|
||||
false, false, true, false);
|
||||
false, false, true, false, false);
|
||||
do_end();
|
||||
}
|
||||
;
|
||||
@@ -275,7 +275,7 @@ Boot_DeclareUniqueIndexStmt:
|
||||
$11,
|
||||
NULL, NIL, NIL,
|
||||
true, false, false,
|
||||
false, false, true, false);
|
||||
false, false, true, false, false);
|
||||
do_end();
|
||||
}
|
||||
;
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.272 2006/07/31 20:09:00 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.273 2006/08/25 04:06:46 tgl Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@@ -34,6 +34,7 @@
|
||||
#include "catalog/index.h"
|
||||
#include "catalog/indexing.h"
|
||||
#include "catalog/pg_constraint.h"
|
||||
#include "catalog/pg_operator.h"
|
||||
#include "catalog/pg_opclass.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "executor/executor.h"
|
||||
@@ -49,8 +50,17 @@
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/relcache.h"
|
||||
#include "utils/syscache.h"
|
||||
#include "utils/tuplesort.h"
|
||||
|
||||
|
||||
/* state info for validate_index bulkdelete callback */
|
||||
typedef struct
|
||||
{
|
||||
Tuplesortstate *tuplesort; /* for sorting the index TIDs */
|
||||
/* statistics (for debug purposes only): */
|
||||
double htups, itups, tups_inserted;
|
||||
} v_i_state;
|
||||
|
||||
/* non-export function prototypes */
|
||||
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
|
||||
IndexInfo *indexInfo,
|
||||
@@ -61,9 +71,16 @@ static void AppendAttributeTuples(Relation indexRelation, int numatts);
|
||||
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
|
||||
IndexInfo *indexInfo,
|
||||
Oid *classOids,
|
||||
bool primary);
|
||||
bool primary,
|
||||
bool isvalid);
|
||||
static void index_update_stats(Relation rel, bool hasindex, bool isprimary,
|
||||
Oid reltoastidxid, double reltuples);
|
||||
static bool validate_index_callback(ItemPointer itemptr, void *opaque);
|
||||
static void validate_index_heapscan(Relation heapRelation,
|
||||
Relation indexRelation,
|
||||
IndexInfo *indexInfo,
|
||||
Snapshot snapshot,
|
||||
v_i_state *state);
|
||||
static Oid IndexGetRelation(Oid indexId);
|
||||
|
||||
|
||||
@@ -308,6 +325,8 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
* UpdateIndexRelation
|
||||
*
|
||||
* Construct and insert a new entry in the pg_index catalog
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static void
|
||||
@@ -315,7 +334,8 @@ UpdateIndexRelation(Oid indexoid,
|
||||
Oid heapoid,
|
||||
IndexInfo *indexInfo,
|
||||
Oid *classOids,
|
||||
bool primary)
|
||||
bool primary,
|
||||
bool isvalid)
|
||||
{
|
||||
int2vector *indkey;
|
||||
oidvector *indclass;
|
||||
@@ -383,6 +403,7 @@ UpdateIndexRelation(Oid indexoid,
|
||||
values[Anum_pg_index_indisunique - 1] = BoolGetDatum(indexInfo->ii_Unique);
|
||||
values[Anum_pg_index_indisprimary - 1] = BoolGetDatum(primary);
|
||||
values[Anum_pg_index_indisclustered - 1] = BoolGetDatum(false);
|
||||
values[Anum_pg_index_indisvalid - 1] = BoolGetDatum(isvalid);
|
||||
values[Anum_pg_index_indkey - 1] = PointerGetDatum(indkey);
|
||||
values[Anum_pg_index_indclass - 1] = PointerGetDatum(indclass);
|
||||
values[Anum_pg_index_indexprs - 1] = exprsDatum;
|
||||
@@ -427,7 +448,10 @@ UpdateIndexRelation(Oid indexoid,
|
||||
* isconstraint: index is owned by a PRIMARY KEY or UNIQUE constraint
|
||||
* allow_system_table_mods: allow table to be a system catalog
|
||||
* skip_build: true to skip the index_build() step for the moment; caller
|
||||
* must do it later (typically via reindex_index())
|
||||
* must do it later (typically via reindex_index())
|
||||
* concurrent: if true, do not lock the table against writers. The index
|
||||
* will be marked "invalid" and the caller must take additional steps
|
||||
* to fix it up.
|
||||
*
|
||||
* Returns OID of the created index.
|
||||
*/
|
||||
@@ -443,7 +467,8 @@ index_create(Oid heapRelationId,
|
||||
bool isprimary,
|
||||
bool isconstraint,
|
||||
bool allow_system_table_mods,
|
||||
bool skip_build)
|
||||
bool skip_build,
|
||||
bool concurrent)
|
||||
{
|
||||
Relation pg_class;
|
||||
Relation heapRelation;
|
||||
@@ -456,9 +481,12 @@ index_create(Oid heapRelationId,
|
||||
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Only SELECT ... FOR UPDATE/SHARE are allowed while doing this
|
||||
* Only SELECT ... FOR UPDATE/SHARE are allowed while doing a standard
|
||||
* index build; but for concurrent builds we allow INSERT/UPDATE/DELETE
|
||||
* (but not VACUUM).
|
||||
*/
|
||||
heapRelation = heap_open(heapRelationId, ShareLock);
|
||||
heapRelation = heap_open(heapRelationId,
|
||||
(concurrent ? ShareUpdateExclusiveLock : ShareLock));
|
||||
|
||||
/*
|
||||
* The index will be in the same namespace as its parent table, and is
|
||||
@@ -480,6 +508,16 @@ index_create(Oid heapRelationId,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("user-defined indexes on system catalog tables are not supported")));
|
||||
|
||||
/*
|
||||
* concurrent index build on a system catalog is unsafe because we tend
|
||||
* to release locks before committing in catalogs
|
||||
*/
|
||||
if (concurrent &&
|
||||
IsSystemRelation(heapRelation))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("concurrent index creation on system catalog tables is not supported")));
|
||||
|
||||
/*
|
||||
* We cannot allow indexing a shared relation after initdb (because
|
||||
* there's no way to make the entry in other databases' pg_class).
|
||||
@@ -578,7 +616,7 @@ index_create(Oid heapRelationId,
|
||||
* ----------------
|
||||
*/
|
||||
UpdateIndexRelation(indexRelationId, heapRelationId, indexInfo,
|
||||
classObjectId, isprimary);
|
||||
classObjectId, isprimary, !concurrent);
|
||||
|
||||
/*
|
||||
* Register constraint and dependencies for the index.
|
||||
@@ -745,9 +783,8 @@ index_create(Oid heapRelationId,
|
||||
}
|
||||
|
||||
/*
|
||||
* Close the heap and index; but we keep the ShareLock on the heap and
|
||||
* the exclusive lock on the index that we acquired above, until end of
|
||||
* transaction.
|
||||
* Close the heap and index; but we keep the locks that we acquired above
|
||||
* until end of transaction.
|
||||
*/
|
||||
index_close(indexRelation, NoLock);
|
||||
heap_close(heapRelation, NoLock);
|
||||
@@ -895,6 +932,7 @@ BuildIndexInfo(Relation index)
|
||||
|
||||
/* other info */
|
||||
ii->ii_Unique = indexStruct->indisunique;
|
||||
ii->ii_Concurrent = false; /* assume normal case */
|
||||
|
||||
return ii;
|
||||
}
|
||||
@@ -1327,13 +1365,22 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
estate);
|
||||
|
||||
/*
|
||||
* Ok, begin our scan of the base relation. We use SnapshotAny because we
|
||||
* must retrieve all tuples and do our own time qual checks.
|
||||
* Prepare for scan of the base relation. In a normal index build,
|
||||
* we use SnapshotAny because we must retrieve all tuples and do our own
|
||||
* time qual checks (because we have to index RECENTLY_DEAD tuples).
|
||||
* In a concurrent build, we take a regular MVCC snapshot and index
|
||||
* whatever's live according to that. During bootstrap we just use
|
||||
* SnapshotNow.
|
||||
*/
|
||||
if (IsBootstrapProcessingMode())
|
||||
{
|
||||
snapshot = SnapshotNow;
|
||||
OldestXmin = InvalidTransactionId;
|
||||
OldestXmin = InvalidTransactionId; /* not used */
|
||||
}
|
||||
else if (indexInfo->ii_Concurrent)
|
||||
{
|
||||
snapshot = CopySnapshot(GetTransactionSnapshot());
|
||||
OldestXmin = InvalidTransactionId; /* not used */
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1344,8 +1391,8 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
|
||||
scan = heap_beginscan(heapRelation, /* relation */
|
||||
snapshot, /* seeself */
|
||||
0, /* number of keys */
|
||||
NULL); /* scan key */
|
||||
0, /* number of keys */
|
||||
NULL); /* scan key */
|
||||
|
||||
reltuples = 0;
|
||||
|
||||
@@ -1374,10 +1421,12 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
scan->rs_cbuf))
|
||||
{
|
||||
case HEAPTUPLE_DEAD:
|
||||
/* Definitely dead, we can ignore it */
|
||||
indexIt = false;
|
||||
tupleIsAlive = false;
|
||||
break;
|
||||
case HEAPTUPLE_LIVE:
|
||||
/* Normal case, index and unique-check it */
|
||||
indexIt = true;
|
||||
tupleIsAlive = true;
|
||||
break;
|
||||
@@ -1388,6 +1437,7 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
* anyway to preserve MVCC semantics. (Pre-existing
|
||||
* transactions could try to use the index after we
|
||||
* finish building it, and may need to see such tuples.)
|
||||
* Exclude it from unique-checking, however.
|
||||
*/
|
||||
indexIt = true;
|
||||
tupleIsAlive = false;
|
||||
@@ -1499,6 +1549,309 @@ IndexBuildHeapScan(Relation heapRelation,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* validate_index - support code for concurrent index builds
|
||||
*
|
||||
* We do a concurrent index build by first building the index normally via
|
||||
* index_create(), while holding a weak lock that allows concurrent
|
||||
* insert/update/delete. Also, we index only tuples that are valid
|
||||
* as of the start of the scan (see IndexBuildHeapScan), whereas a normal
|
||||
* build takes care to include recently-dead tuples. This is OK because
|
||||
* we won't mark the index valid until all transactions that might be able
|
||||
* to see those tuples are gone. The reason for doing that is to avoid
|
||||
* bogus unique-index failures due to concurrent UPDATEs (we might see
|
||||
* different versions of the same row as being valid when we pass over them,
|
||||
* if we used HeapTupleSatisfiesVacuum). This leaves us with an index that
|
||||
* does not contain any tuples added to the table while we built the index.
|
||||
*
|
||||
* Next, we commit the transaction so that the index becomes visible to other
|
||||
* backends, but it is marked not "indisvalid" to prevent the planner from
|
||||
* relying on it for indexscans. Then we wait for all transactions that
|
||||
* could have been modifying the table to terminate. At this point we
|
||||
* know that any subsequently-started transactions will see the index and
|
||||
* insert their new tuples into it. We then take a new reference snapshot
|
||||
* which is passed to validate_index(). Any tuples that are valid according
|
||||
* to this snap, but are not in the index, must be added to the index.
|
||||
* (Any tuples committed live after the snap will be inserted into the
|
||||
* index by their originating transaction. Any tuples committed dead before
|
||||
* the snap need not be indexed, because we will wait out all transactions
|
||||
* that might care about them before we mark the index valid.)
|
||||
*
|
||||
* validate_index() works by first gathering all the TIDs currently in the
|
||||
* index, using a bulkdelete callback that just stores the TIDs and doesn't
|
||||
* ever say "delete it". (This should be faster than a plain indexscan;
|
||||
* also, not all index AMs support full-index indexscan.) Then we sort the
|
||||
* TIDs, and finally scan the table doing a "merge join" against the TID list
|
||||
* to see which tuples are missing from the index. Thus we will ensure that
|
||||
* all tuples valid according to the reference snapshot are in the index.
|
||||
*
|
||||
* Building a unique index this way is tricky: we might try to insert a
|
||||
* tuple that is already dead or is in process of being deleted, and we
|
||||
* mustn't have a uniqueness failure against an updated version of the same
|
||||
* row. We can check the tuple to see if it's already dead and tell
|
||||
* index_insert() not to do the uniqueness check, but that still leaves us
|
||||
* with a race condition against an in-progress update. To handle that,
|
||||
* we expect the index AM to recheck liveness of the to-be-inserted tuple
|
||||
* before it declares a uniqueness error.
|
||||
*
|
||||
* After completing validate_index(), we wait until all transactions that
|
||||
* were alive at the time of the reference snapshot are gone; this is
|
||||
* necessary to be sure there are none left with a serializable snapshot
|
||||
* older than the reference (and hence possibly able to see tuples we did
|
||||
* not index). Then we mark the index valid and commit.
|
||||
*
|
||||
* Doing two full table scans is a brute-force strategy. We could try to be
|
||||
* cleverer, eg storing new tuples in a special area of the table (perhaps
|
||||
* making the table append-only by setting use_fsm). However that would
|
||||
* add yet more locking issues.
|
||||
*/
|
||||
void
|
||||
validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
|
||||
{
|
||||
Relation heapRelation, indexRelation;
|
||||
IndexInfo *indexInfo;
|
||||
IndexVacuumInfo ivinfo;
|
||||
v_i_state state;
|
||||
|
||||
/* Open and lock the parent heap relation */
|
||||
heapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
|
||||
/* And the target index relation */
|
||||
indexRelation = index_open(indexId, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Fetch info needed for index_insert. (You might think this should
|
||||
* be passed in from DefineIndex, but its copy is long gone due to
|
||||
* having been built in a previous transaction.)
|
||||
*/
|
||||
indexInfo = BuildIndexInfo(indexRelation);
|
||||
|
||||
/* mark build is concurrent just for consistency */
|
||||
indexInfo->ii_Concurrent = true;
|
||||
|
||||
/*
|
||||
* Scan the index and gather up all the TIDs into a tuplesort object.
|
||||
*/
|
||||
ivinfo.index = indexRelation;
|
||||
ivinfo.vacuum_full = false;
|
||||
ivinfo.message_level = DEBUG2;
|
||||
ivinfo.num_heap_tuples = -1;
|
||||
|
||||
state.tuplesort = tuplesort_begin_datum(TIDOID,
|
||||
TIDLessOperator,
|
||||
maintenance_work_mem,
|
||||
false);
|
||||
state.htups = state.itups = state.tups_inserted = 0;
|
||||
|
||||
(void) index_bulk_delete(&ivinfo, NULL,
|
||||
validate_index_callback, (void *) &state);
|
||||
|
||||
/* Execute the sort */
|
||||
tuplesort_performsort(state.tuplesort);
|
||||
|
||||
/*
|
||||
* Now scan the heap and "merge" it with the index
|
||||
*/
|
||||
validate_index_heapscan(heapRelation,
|
||||
indexRelation,
|
||||
indexInfo,
|
||||
snapshot,
|
||||
&state);
|
||||
|
||||
/* Done with tuplesort object */
|
||||
tuplesort_end(state.tuplesort);
|
||||
|
||||
elog(DEBUG2,
|
||||
"validate_index found %.0f heap tuples, %.0f index tuples; inserted %.0f missing tuples",
|
||||
state.htups, state.itups, state.tups_inserted);
|
||||
|
||||
/* Close rels, but keep locks */
|
||||
index_close(indexRelation, NoLock);
|
||||
heap_close(heapRelation, NoLock);
|
||||
}
|
||||
|
||||
/*
|
||||
* validate_index_callback - bulkdelete callback to collect the index TIDs
|
||||
*/
|
||||
static bool
|
||||
validate_index_callback(ItemPointer itemptr, void *opaque)
|
||||
{
|
||||
v_i_state *state = (v_i_state *) opaque;
|
||||
|
||||
tuplesort_putdatum(state->tuplesort, PointerGetDatum(itemptr), false);
|
||||
state->itups += 1;
|
||||
return false; /* never actually delete anything */
|
||||
}
|
||||
|
||||
/*
|
||||
* validate_index_heapscan - second table scan for concurrent index build
|
||||
*
|
||||
* This has much code in common with IndexBuildHeapScan, but it's enough
|
||||
* different that it seems cleaner to have two routines not one.
|
||||
*/
|
||||
static void
|
||||
validate_index_heapscan(Relation heapRelation,
|
||||
Relation indexRelation,
|
||||
IndexInfo *indexInfo,
|
||||
Snapshot snapshot,
|
||||
v_i_state *state)
|
||||
{
|
||||
HeapScanDesc scan;
|
||||
HeapTuple heapTuple;
|
||||
Datum values[INDEX_MAX_KEYS];
|
||||
bool isnull[INDEX_MAX_KEYS];
|
||||
List *predicate;
|
||||
TupleTableSlot *slot;
|
||||
EState *estate;
|
||||
ExprContext *econtext;
|
||||
/* state variables for the merge */
|
||||
ItemPointer indexcursor = NULL;
|
||||
bool tuplesort_empty = false;
|
||||
|
||||
/*
|
||||
* sanity checks
|
||||
*/
|
||||
Assert(OidIsValid(indexRelation->rd_rel->relam));
|
||||
|
||||
/*
|
||||
* Need an EState for evaluation of index expressions and partial-index
|
||||
* predicates. Also a slot to hold the current tuple.
|
||||
*/
|
||||
estate = CreateExecutorState();
|
||||
econtext = GetPerTupleExprContext(estate);
|
||||
slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation));
|
||||
|
||||
/* Arrange for econtext's scan tuple to be the tuple under test */
|
||||
econtext->ecxt_scantuple = slot;
|
||||
|
||||
/* Set up execution state for predicate, if any. */
|
||||
predicate = (List *)
|
||||
ExecPrepareExpr((Expr *) indexInfo->ii_Predicate,
|
||||
estate);
|
||||
|
||||
/*
|
||||
* Prepare for scan of the base relation. We need just those tuples
|
||||
* satisfying the passed-in reference snapshot.
|
||||
*/
|
||||
scan = heap_beginscan(heapRelation, /* relation */
|
||||
snapshot, /* seeself */
|
||||
0, /* number of keys */
|
||||
NULL); /* scan key */
|
||||
|
||||
/*
|
||||
* Scan all tuples matching the snapshot.
|
||||
*/
|
||||
while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
ItemPointer heapcursor = &heapTuple->t_self;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
state->htups += 1;
|
||||
|
||||
/*
|
||||
* "merge" by skipping through the index tuples until we find or
|
||||
* pass the current heap tuple.
|
||||
*/
|
||||
while (!tuplesort_empty &&
|
||||
(!indexcursor ||
|
||||
ItemPointerCompare(indexcursor, heapcursor) < 0))
|
||||
{
|
||||
Datum ts_val;
|
||||
bool ts_isnull;
|
||||
|
||||
if (indexcursor)
|
||||
pfree(indexcursor);
|
||||
tuplesort_empty = !tuplesort_getdatum(state->tuplesort, true,
|
||||
&ts_val, &ts_isnull);
|
||||
Assert(tuplesort_empty || !ts_isnull);
|
||||
indexcursor = (ItemPointer) DatumGetPointer(ts_val);
|
||||
}
|
||||
|
||||
if (tuplesort_empty ||
|
||||
ItemPointerCompare(indexcursor, heapcursor) > 0)
|
||||
{
|
||||
/*
|
||||
* We've overshot which means this heap tuple is missing from the
|
||||
* index, so insert it.
|
||||
*/
|
||||
bool check_unique;
|
||||
|
||||
MemoryContextReset(econtext->ecxt_per_tuple_memory);
|
||||
|
||||
/* Set up for predicate or expression evaluation */
|
||||
ExecStoreTuple(heapTuple, slot, InvalidBuffer, false);
|
||||
|
||||
/*
|
||||
* In a partial index, discard tuples that don't satisfy the
|
||||
* predicate.
|
||||
*/
|
||||
if (predicate != NIL)
|
||||
{
|
||||
if (!ExecQual(predicate, econtext, false))
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* For the current heap tuple, extract all the attributes we use
|
||||
* in this index, and note which are null. This also performs
|
||||
* evaluation of any expressions needed.
|
||||
*/
|
||||
FormIndexDatum(indexInfo,
|
||||
slot,
|
||||
estate,
|
||||
values,
|
||||
isnull);
|
||||
|
||||
/*
|
||||
* If the tuple is already committed dead, we still have to
|
||||
* put it in the index (because some xacts might be able to
|
||||
* see it), but we might as well suppress uniqueness checking.
|
||||
* This is just an optimization because the index AM is not
|
||||
* supposed to raise a uniqueness failure anyway.
|
||||
*/
|
||||
if (indexInfo->ii_Unique)
|
||||
{
|
||||
/* must hold a buffer lock to call HeapTupleSatisfiesNow */
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
|
||||
if (HeapTupleSatisfiesNow(heapTuple->t_data, scan->rs_cbuf))
|
||||
check_unique = true;
|
||||
else
|
||||
check_unique = false;
|
||||
|
||||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
}
|
||||
else
|
||||
check_unique = false;
|
||||
|
||||
/*
|
||||
* You'd think we should go ahead and build the index tuple here,
|
||||
* but some index AMs want to do further processing on the data
|
||||
* first. So pass the values[] and isnull[] arrays, instead.
|
||||
*/
|
||||
index_insert(indexRelation,
|
||||
values,
|
||||
isnull,
|
||||
heapcursor,
|
||||
heapRelation,
|
||||
check_unique);
|
||||
|
||||
state->tups_inserted += 1;
|
||||
}
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
|
||||
ExecDropSingleTupleTableSlot(slot);
|
||||
|
||||
FreeExecutorState(estate);
|
||||
|
||||
/* These may have been pointing to the now-gone estate */
|
||||
indexInfo->ii_ExpressionsState = NIL;
|
||||
indexInfo->ii_PredicateState = NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IndexGetRelation: given an index's relation OID, get the OID of the
|
||||
* relation it is an index on. Uses the system cache.
|
||||
@@ -1530,9 +1883,12 @@ void
|
||||
reindex_index(Oid indexId)
|
||||
{
|
||||
Relation iRel,
|
||||
heapRelation;
|
||||
heapRelation,
|
||||
pg_index;
|
||||
Oid heapId;
|
||||
bool inplace;
|
||||
HeapTuple indexTuple;
|
||||
Form_pg_index indexForm;
|
||||
|
||||
/*
|
||||
* Open and lock the parent heap relation. ShareLock is sufficient since
|
||||
@@ -1600,6 +1956,28 @@ reindex_index(Oid indexId)
|
||||
PG_END_TRY();
|
||||
ResetReindexProcessing();
|
||||
|
||||
/*
|
||||
* If the index is marked invalid (ie, it's from a failed CREATE INDEX
|
||||
* CONCURRENTLY), we can now mark it valid. This allows REINDEX to be
|
||||
* used to clean up in such cases.
|
||||
*/
|
||||
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
|
||||
|
||||
indexTuple = SearchSysCacheCopy(INDEXRELID,
|
||||
ObjectIdGetDatum(indexId),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(indexTuple))
|
||||
elog(ERROR, "cache lookup failed for index %u", indexId);
|
||||
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
|
||||
if (!indexForm->indisvalid)
|
||||
{
|
||||
indexForm->indisvalid = true;
|
||||
simple_heap_update(pg_index, &indexTuple->t_self, indexTuple);
|
||||
CatalogUpdateIndexes(pg_index, indexTuple);
|
||||
}
|
||||
heap_close(pg_index, RowExclusiveLock);
|
||||
|
||||
/* Close rels, but keep locks */
|
||||
index_close(iRel, NoLock);
|
||||
heap_close(heapRelation, NoLock);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.1 2006/07/31 01:16:37 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.2 2006/08/25 04:06:47 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -218,6 +218,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid)
|
||||
indexInfo->ii_Predicate = NIL;
|
||||
indexInfo->ii_PredicateState = NIL;
|
||||
indexInfo->ii_Unique = true;
|
||||
indexInfo->ii_Concurrent = false;
|
||||
|
||||
classObjectId[0] = OID_BTREE_OPS_OID;
|
||||
classObjectId[1] = INT4_BTREE_OPS_OID;
|
||||
@@ -227,7 +228,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid)
|
||||
BTREE_AM_OID,
|
||||
rel->rd_rel->reltablespace,
|
||||
classObjectId, (Datum) 0,
|
||||
true, false, true, false);
|
||||
true, false, true, false, false);
|
||||
|
||||
/*
|
||||
* Store the toast table's OID in the parent relation's pg_class row
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.146 2006/07/31 01:16:37 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.147 2006/08/25 04:06:48 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "access/genam.h"
|
||||
#include "access/heapam.h"
|
||||
#include "access/reloptions.h"
|
||||
#include "access/transam.h"
|
||||
#include "access/xact.h"
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/dependency.h"
|
||||
@@ -85,6 +86,7 @@ static bool relationHasPrimaryKey(Relation rel);
|
||||
* 'skip_build': make the catalog entries but leave the index file empty;
|
||||
* it will be filled later.
|
||||
* 'quiet': suppress the NOTICE chatter ordinarily provided for constraints.
|
||||
* 'concurrent': avoid blocking writers to the table while building.
|
||||
*/
|
||||
void
|
||||
DefineIndex(RangeVar *heapRelation,
|
||||
@@ -102,7 +104,8 @@ DefineIndex(RangeVar *heapRelation,
|
||||
bool is_alter_table,
|
||||
bool check_rights,
|
||||
bool skip_build,
|
||||
bool quiet)
|
||||
bool quiet,
|
||||
bool concurrent)
|
||||
{
|
||||
Oid *classObjectId;
|
||||
Oid accessMethodId;
|
||||
@@ -116,6 +119,12 @@ DefineIndex(RangeVar *heapRelation,
|
||||
Datum reloptions;
|
||||
IndexInfo *indexInfo;
|
||||
int numberOfAttributes;
|
||||
uint32 ixcnt;
|
||||
LockRelId heaprelid;
|
||||
Snapshot snapshot;
|
||||
Relation pg_index;
|
||||
HeapTuple indexTuple;
|
||||
Form_pg_index indexForm;
|
||||
|
||||
/*
|
||||
* count attributes in index
|
||||
@@ -133,8 +142,16 @@ DefineIndex(RangeVar *heapRelation,
|
||||
|
||||
/*
|
||||
* Open heap relation, acquire a suitable lock on it, remember its OID
|
||||
*
|
||||
* Only SELECT ... FOR UPDATE/SHARE are allowed while doing a standard
|
||||
* index build; but for concurrent builds we allow INSERT/UPDATE/DELETE
|
||||
* (but not VACUUM).
|
||||
*/
|
||||
rel = heap_openrv(heapRelation, ShareLock);
|
||||
rel = heap_openrv(heapRelation,
|
||||
(concurrent ? ShareUpdateExclusiveLock : ShareLock));
|
||||
|
||||
relationId = RelationGetRelid(rel);
|
||||
namespaceId = RelationGetNamespace(rel);
|
||||
|
||||
/* Note: during bootstrap may see uncataloged relation */
|
||||
if (rel->rd_rel->relkind != RELKIND_RELATION &&
|
||||
@@ -144,8 +161,13 @@ DefineIndex(RangeVar *heapRelation,
|
||||
errmsg("\"%s\" is not a table",
|
||||
heapRelation->relname)));
|
||||
|
||||
relationId = RelationGetRelid(rel);
|
||||
namespaceId = RelationGetNamespace(rel);
|
||||
/*
|
||||
* Don't try to CREATE INDEX on temp tables of other backends.
|
||||
*/
|
||||
if (isOtherTempNamespace(namespaceId))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot create indexes on temporary tables of other sessions")));
|
||||
|
||||
/*
|
||||
* Verify we (still) have CREATE rights in the rel's namespace.
|
||||
@@ -391,6 +413,7 @@ DefineIndex(RangeVar *heapRelation,
|
||||
indexInfo->ii_Predicate = make_ands_implicit(predicate);
|
||||
indexInfo->ii_PredicateState = NIL;
|
||||
indexInfo->ii_Unique = unique;
|
||||
indexInfo->ii_Concurrent = concurrent;
|
||||
|
||||
classObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid));
|
||||
ComputeIndexAttrs(indexInfo, classObjectId, attributeList,
|
||||
@@ -410,10 +433,122 @@ DefineIndex(RangeVar *heapRelation,
|
||||
primary ? "PRIMARY KEY" : "UNIQUE",
|
||||
indexRelationName, RelationGetRelationName(rel))));
|
||||
|
||||
index_create(relationId, indexRelationName, indexRelationId,
|
||||
indexInfo, accessMethodId, tablespaceId, classObjectId,
|
||||
reloptions, primary, isconstraint,
|
||||
allowSystemTableMods, skip_build);
|
||||
indexRelationId =
|
||||
index_create(relationId, indexRelationName, indexRelationId,
|
||||
indexInfo, accessMethodId, tablespaceId, classObjectId,
|
||||
reloptions, primary, isconstraint,
|
||||
allowSystemTableMods, skip_build, concurrent);
|
||||
|
||||
if (!concurrent)
|
||||
return; /* We're done, in the standard case */
|
||||
|
||||
/*
|
||||
* Phase 2 of concurrent index build (see comments for validate_index()
|
||||
* for an overview of how this works)
|
||||
*
|
||||
* We must commit our current transaction so that the index becomes
|
||||
* visible; then start another. Note that all the data structures
|
||||
* we just built are lost in the commit. The only data we keep past
|
||||
* here are the relation IDs.
|
||||
*
|
||||
* Before committing, get a session-level lock on the table, to ensure
|
||||
* that neither it nor the index can be dropped before we finish.
|
||||
* This cannot block, even if someone else is waiting for access, because
|
||||
* we already have the same lock within our transaction.
|
||||
*
|
||||
* Note: we don't currently bother with a session lock on the index,
|
||||
* because there are no operations that could change its state while
|
||||
* we hold lock on the parent table. This might need to change later.
|
||||
*/
|
||||
heaprelid = rel->rd_lockInfo.lockRelId;
|
||||
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
|
||||
|
||||
CommitTransactionCommand();
|
||||
StartTransactionCommand();
|
||||
|
||||
/* Establish transaction snapshot ... else GetLatestSnapshot complains */
|
||||
(void) GetTransactionSnapshot();
|
||||
|
||||
/*
|
||||
* Now we must wait until no running transaction could have the table open
|
||||
* with the old list of indexes. If we can take an exclusive lock then
|
||||
* there are none now and anybody who opens it later will get the new
|
||||
* index in their relcache entry. Alternatively, if our Xmin reaches our
|
||||
* own (new) transaction then we know no transactions that started before
|
||||
* the index was visible are left anyway.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
if (ConditionalLockRelationOid(relationId, ExclusiveLock))
|
||||
{
|
||||
/* Release the lock right away to avoid blocking anyone */
|
||||
UnlockRelationOid(relationId, ExclusiveLock);
|
||||
break;
|
||||
}
|
||||
|
||||
if (TransactionIdEquals(GetLatestSnapshot()->xmin,
|
||||
GetTopTransactionId()))
|
||||
break;
|
||||
|
||||
pg_usleep(1000000L); /* 1 sec */
|
||||
}
|
||||
|
||||
/*
|
||||
* Now take the "reference snapshot" that will be used by validate_index()
|
||||
* to filter candidate tuples. All other transactions running at this
|
||||
* time will have to be out-waited before we can commit, because we can't
|
||||
* guarantee that tuples deleted just before this will be in the index.
|
||||
*
|
||||
* We also set ActiveSnapshot to this snap, since functions in indexes
|
||||
* may need a snapshot.
|
||||
*/
|
||||
snapshot = CopySnapshot(GetTransactionSnapshot());
|
||||
ActiveSnapshot = snapshot;
|
||||
|
||||
/*
|
||||
* Scan the index and the heap, insert any missing index entries.
|
||||
*/
|
||||
validate_index(relationId, indexRelationId, snapshot);
|
||||
|
||||
/*
|
||||
* The index is now valid in the sense that it contains all currently
|
||||
* interesting tuples. But since it might not contain tuples deleted
|
||||
* just before the reference snap was taken, we have to wait out any
|
||||
* transactions older than the reference snap. We can do this by
|
||||
* waiting for each xact explicitly listed in the snap.
|
||||
*
|
||||
* Note: GetSnapshotData() never stores our own xid into a snap,
|
||||
* hence we need not check for that.
|
||||
*/
|
||||
for (ixcnt = 0; ixcnt < snapshot->xcnt; ixcnt++)
|
||||
XactLockTableWait(snapshot->xip[ixcnt]);
|
||||
|
||||
/* Index can now be marked valid -- update its pg_index entry */
|
||||
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
|
||||
|
||||
indexTuple = SearchSysCacheCopy(INDEXRELID,
|
||||
ObjectIdGetDatum(indexRelationId),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(indexTuple))
|
||||
elog(ERROR, "cache lookup failed for index %u", indexRelationId);
|
||||
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
|
||||
Assert(indexForm->indexrelid = indexRelationId);
|
||||
Assert(!indexForm->indisvalid);
|
||||
|
||||
indexForm->indisvalid = true;
|
||||
|
||||
simple_heap_update(pg_index, &indexTuple->t_self, indexTuple);
|
||||
CatalogUpdateIndexes(pg_index, indexTuple);
|
||||
|
||||
heap_close(pg_index, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Last thing to do is release the session-level lock on the parent table.
|
||||
*/
|
||||
UnlockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.200 2006/08/21 00:57:24 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.201 2006/08/25 04:06:48 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -3832,7 +3832,8 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
|
||||
true, /* is_alter_table */
|
||||
check_rights,
|
||||
skip_build,
|
||||
quiet);
|
||||
quiet,
|
||||
false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.348 2006/08/21 00:57:24 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.349 2006/08/25 04:06:49 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -2049,6 +2049,7 @@ _copyIndexStmt(IndexStmt *from)
|
||||
COPY_SCALAR_FIELD(unique);
|
||||
COPY_SCALAR_FIELD(primary);
|
||||
COPY_SCALAR_FIELD(isconstraint);
|
||||
COPY_SCALAR_FIELD(concurrent);
|
||||
|
||||
return newnode;
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.282 2006/08/21 00:57:24 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.283 2006/08/25 04:06:49 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -962,6 +962,7 @@ _equalIndexStmt(IndexStmt *a, IndexStmt *b)
|
||||
COMPARE_SCALAR_FIELD(unique);
|
||||
COMPARE_SCALAR_FIELD(primary);
|
||||
COMPARE_SCALAR_FIELD(isconstraint);
|
||||
COMPARE_SCALAR_FIELD(concurrent);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.283 2006/08/21 00:57:24 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.284 2006/08/25 04:06:50 tgl Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Every node type that can appear in stored rules' parsetrees *must*
|
||||
@@ -1353,6 +1353,7 @@ _outIndexStmt(StringInfo str, IndexStmt *node)
|
||||
WRITE_BOOL_FIELD(unique);
|
||||
WRITE_BOOL_FIELD(primary);
|
||||
WRITE_BOOL_FIELD(isconstraint);
|
||||
WRITE_BOOL_FIELD(concurrent);
|
||||
}
|
||||
|
||||
static void
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.124 2006/08/05 00:22:49 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.125 2006/08/25 04:06:50 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -138,6 +138,18 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, RelOptInfo *rel)
|
||||
indexRelation = index_open(indexoid, lmode);
|
||||
index = indexRelation->rd_index;
|
||||
|
||||
/*
|
||||
* Ignore invalid indexes, since they can't safely be used for
|
||||
* queries. Note that this is OK because the data structure
|
||||
* we are constructing is only used by the planner --- the
|
||||
* executor still needs to insert into "invalid" indexes!
|
||||
*/
|
||||
if (!index->indisvalid)
|
||||
{
|
||||
index_close(indexRelation, NoLock);
|
||||
continue;
|
||||
}
|
||||
|
||||
info = makeNode(IndexOptInfo);
|
||||
|
||||
info->indexoid = index->indexrelid;
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.347 2006/08/21 00:57:24 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.348 2006/08/25 04:06:51 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -1488,6 +1488,7 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
|
||||
index->tableSpace = constraint->indexspace;
|
||||
index->indexParams = NIL;
|
||||
index->whereClause = NULL;
|
||||
index->concurrent = false;
|
||||
|
||||
/*
|
||||
* Make sure referenced keys exist. If we are making a PRIMARY KEY
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/parser/gram.y,v 2.557 2006/08/21 00:57:25 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/parser/gram.y,v 2.558 2006/08/25 04:06:51 tgl Exp $
|
||||
*
|
||||
* HISTORY
|
||||
* AUTHOR DATE MAJOR EVENT
|
||||
@@ -364,7 +364,8 @@ static void doNegateFloat(Value *v);
|
||||
CACHE CALLED CASCADE CASCADED CASE CAST CHAIN CHAR_P
|
||||
CHARACTER CHARACTERISTICS CHECK CHECKPOINT CLASS CLOSE
|
||||
CLUSTER COALESCE COLLATE COLUMN COMMENT COMMIT
|
||||
COMMITTED CONNECTION CONSTRAINT CONSTRAINTS CONVERSION_P CONVERT COPY CREATE CREATEDB
|
||||
COMMITTED CONCURRENTLY CONNECTION CONSTRAINT CONSTRAINTS
|
||||
CONVERSION_P CONVERT COPY CREATE CREATEDB
|
||||
CREATEROLE CREATEUSER CROSS CSV CURRENT_DATE CURRENT_ROLE CURRENT_TIME
|
||||
CURRENT_TIMESTAMP CURRENT_USER CURSOR CYCLE
|
||||
|
||||
@@ -3638,20 +3639,22 @@ opt_granted_by: GRANTED BY RoleId { $$ = $3; }
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* QUERY:
|
||||
* create index <indexname> on <relname>
|
||||
* [ using <access> ] "(" ( <col> [ using <opclass> ] )+ ")"
|
||||
* [ tablespace <tablespacename> ] [ where <predicate> ]
|
||||
* QUERY: CREATE INDEX
|
||||
*
|
||||
* Note: we can't factor CONCURRENTLY into a separate production without
|
||||
* making it a reserved word.
|
||||
*
|
||||
* Note: we cannot put TABLESPACE clause after WHERE clause unless we are
|
||||
* willing to make TABLESPACE a fully reserved word.
|
||||
*****************************************************************************/
|
||||
|
||||
IndexStmt: CREATE index_opt_unique INDEX index_name ON qualified_name
|
||||
access_method_clause '(' index_params ')' opt_definition OptTableSpace where_clause
|
||||
IndexStmt: CREATE index_opt_unique INDEX index_name
|
||||
ON qualified_name access_method_clause '(' index_params ')'
|
||||
opt_definition OptTableSpace where_clause
|
||||
{
|
||||
IndexStmt *n = makeNode(IndexStmt);
|
||||
n->unique = $2;
|
||||
n->concurrent = false;
|
||||
n->idxname = $4;
|
||||
n->relation = $6;
|
||||
n->accessMethod = $7;
|
||||
@@ -3661,6 +3664,22 @@ IndexStmt: CREATE index_opt_unique INDEX index_name ON qualified_name
|
||||
n->whereClause = $13;
|
||||
$$ = (Node *)n;
|
||||
}
|
||||
| CREATE index_opt_unique INDEX CONCURRENTLY index_name
|
||||
ON qualified_name access_method_clause '(' index_params ')'
|
||||
opt_definition OptTableSpace where_clause
|
||||
{
|
||||
IndexStmt *n = makeNode(IndexStmt);
|
||||
n->unique = $2;
|
||||
n->concurrent = true;
|
||||
n->idxname = $5;
|
||||
n->relation = $7;
|
||||
n->accessMethod = $8;
|
||||
n->indexParams = $10;
|
||||
n->options = $12;
|
||||
n->tableSpace = $13;
|
||||
n->whereClause = $14;
|
||||
$$ = (Node *)n;
|
||||
}
|
||||
;
|
||||
|
||||
index_opt_unique:
|
||||
@@ -8491,6 +8510,7 @@ unreserved_keyword:
|
||||
| COMMENT
|
||||
| COMMIT
|
||||
| COMMITTED
|
||||
| CONCURRENTLY
|
||||
| CONNECTION
|
||||
| CONSTRAINTS
|
||||
| CONVERSION_P
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.175 2006/08/12 02:52:05 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.176 2006/08/25 04:06:52 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -84,6 +84,7 @@ static const ScanKeyword ScanKeywords[] = {
|
||||
{"comment", COMMENT},
|
||||
{"commit", COMMIT},
|
||||
{"committed", COMMITTED},
|
||||
{"concurrently", CONCURRENTLY},
|
||||
{"connection", CONNECTION},
|
||||
{"constraint", CONSTRAINT},
|
||||
{"constraints", CONSTRAINTS},
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.17 2006/07/14 14:52:23 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.18 2006/08/25 04:06:53 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -16,13 +16,14 @@
|
||||
|
||||
#include "storage/itemptr.h"
|
||||
|
||||
|
||||
/*
|
||||
* ItemPointerEquals
|
||||
* Returns true if both item pointers point to the same item,
|
||||
* otherwise returns false.
|
||||
*
|
||||
* Note:
|
||||
* Assumes that the disk item pointers are not NULL.
|
||||
* Asserts that the disk item pointers are both valid!
|
||||
*/
|
||||
bool
|
||||
ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
|
||||
@@ -35,3 +36,30 @@ ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* ItemPointerCompare
|
||||
* Generic btree-style comparison for item pointers.
|
||||
*/
|
||||
int32
|
||||
ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
|
||||
{
|
||||
/*
|
||||
* Don't use ItemPointerGetBlockNumber or ItemPointerGetOffsetNumber here,
|
||||
* because they assert ip_posid != 0 which might not be true for a
|
||||
* user-supplied TID.
|
||||
*/
|
||||
BlockNumber b1 = BlockIdGetBlockNumber(&(arg1->ip_blkid));
|
||||
BlockNumber b2 = BlockIdGetBlockNumber(&(arg2->ip_blkid));
|
||||
|
||||
if (b1 < b2)
|
||||
return -1;
|
||||
else if (b1 > b2)
|
||||
return 1;
|
||||
else if (arg1->ip_posid < arg2->ip_posid)
|
||||
return -1;
|
||||
else if (arg1->ip_posid > arg2->ip_posid)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.266 2006/08/15 18:26:58 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.267 2006/08/25 04:06:53 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -781,6 +781,9 @@ ProcessUtility(Node *parsetree,
|
||||
{
|
||||
IndexStmt *stmt = (IndexStmt *) parsetree;
|
||||
|
||||
if (stmt->concurrent)
|
||||
PreventTransactionChain(stmt, "CREATE INDEX CONCURRENTLY");
|
||||
|
||||
CheckRelationOwnership(stmt->relation, true);
|
||||
|
||||
DefineIndex(stmt->relation, /* relation */
|
||||
@@ -795,10 +798,11 @@ ProcessUtility(Node *parsetree,
|
||||
stmt->unique,
|
||||
stmt->primary,
|
||||
stmt->isconstraint,
|
||||
false, /* is_alter_table */
|
||||
true, /* check_rights */
|
||||
false, /* skip_build */
|
||||
false); /* quiet */
|
||||
false, /* is_alter_table */
|
||||
true, /* check_rights */
|
||||
false, /* skip_build */
|
||||
false, /* quiet */
|
||||
stmt->concurrent); /* concurrent */
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.54 2006/07/21 20:51:32 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.55 2006/08/25 04:06:53 tgl Exp $
|
||||
*
|
||||
* NOTES
|
||||
* input routine largely stolen from boxin().
|
||||
@@ -158,36 +158,13 @@ tidsend(PG_FUNCTION_ARGS)
|
||||
* PUBLIC ROUTINES *
|
||||
*****************************************************************************/
|
||||
|
||||
static int32
|
||||
tid_cmp_internal(ItemPointer arg1, ItemPointer arg2)
|
||||
{
|
||||
/*
|
||||
* Don't use ItemPointerGetBlockNumber or ItemPointerGetOffsetNumber here,
|
||||
* because they assert ip_posid != 0 which might not be true for a
|
||||
* user-supplied TID.
|
||||
*/
|
||||
BlockNumber b1 = BlockIdGetBlockNumber(&(arg1->ip_blkid));
|
||||
BlockNumber b2 = BlockIdGetBlockNumber(&(arg2->ip_blkid));
|
||||
|
||||
if (b1 < b2)
|
||||
return -1;
|
||||
else if (b1 > b2)
|
||||
return 1;
|
||||
else if (arg1->ip_posid < arg2->ip_posid)
|
||||
return -1;
|
||||
else if (arg1->ip_posid > arg2->ip_posid)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
Datum
|
||||
tideq(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_BOOL(tid_cmp_internal(arg1,arg2) == 0);
|
||||
PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) == 0);
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -196,7 +173,7 @@ tidne(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_BOOL(tid_cmp_internal(arg1,arg2) != 0);
|
||||
PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) != 0);
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -205,7 +182,7 @@ tidlt(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_BOOL(tid_cmp_internal(arg1,arg2) < 0);
|
||||
PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) < 0);
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -214,7 +191,7 @@ tidle(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_BOOL(tid_cmp_internal(arg1,arg2) <= 0);
|
||||
PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) <= 0);
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -223,7 +200,7 @@ tidgt(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_BOOL(tid_cmp_internal(arg1,arg2) > 0);
|
||||
PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) > 0);
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -232,7 +209,7 @@ tidge(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_BOOL(tid_cmp_internal(arg1,arg2) >= 0);
|
||||
PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) >= 0);
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -241,7 +218,7 @@ bttidcmp(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_INT32(tid_cmp_internal(arg1, arg2));
|
||||
PG_RETURN_INT32(ItemPointerCompare(arg1, arg2));
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -250,7 +227,7 @@ tidlarger(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_ITEMPOINTER(tid_cmp_internal(arg1,arg2) >= 0 ? arg1 : arg2);
|
||||
PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1,arg2) >= 0 ? arg1 : arg2);
|
||||
}
|
||||
|
||||
Datum
|
||||
@@ -259,7 +236,7 @@ tidsmaller(PG_FUNCTION_ARGS)
|
||||
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
|
||||
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
|
||||
|
||||
PG_RETURN_ITEMPOINTER(tid_cmp_internal(arg1,arg2) <= 0 ? arg1 : arg2);
|
||||
PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1,arg2) <= 0 ? arg1 : arg2);
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user