1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-10 17:42:29 +03:00

Support unlogged tables.

The contents of an unlogged table are WAL-logged; thus, they are not
available on standby servers and are truncated whenever the database
system enters recovery.  Indexes on unlogged tables are also unlogged.
Unlogged GiST indexes are not currently supported.
This commit is contained in:
Robert Haas
2010-12-29 06:48:53 -05:00
parent 9b8aff8c19
commit 53dbc27c62
49 changed files with 916 additions and 104 deletions

View File

@@ -69,7 +69,7 @@ hashbuild(PG_FUNCTION_ARGS)
estimate_rel_size(heap, NULL, &relpages, &reltuples);
/* Initialize the hash index metadata page and initial buckets */
num_buckets = _hash_metapinit(index, reltuples);
num_buckets = _hash_metapinit(index, reltuples, MAIN_FORKNUM);
/*
* If we just insert the tuples into the index in scan order, then
@@ -113,6 +113,19 @@ hashbuild(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(result);
}
/*
* hashbuildempty() -- build an empty hash index in the initialization fork
*/
Datum
hashbuildempty(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
_hash_metapinit(index, 0, INIT_FORKNUM);
PG_RETURN_VOID();
}
/*
* Per-tuple callback from IndexBuildHeapScan
*/

View File

@@ -259,7 +259,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* convenient to pre-mark them as "in use" too.
*/
bit = metap->hashm_spares[splitnum];
_hash_initbitmap(rel, metap, bitno_to_blkno(metap, bit));
_hash_initbitmap(rel, metap, bitno_to_blkno(metap, bit), MAIN_FORKNUM);
metap->hashm_spares[splitnum]++;
}
else
@@ -280,7 +280,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* with metapage write lock held; would be better to use a lock that
* doesn't block incoming searches.
*/
newbuf = _hash_getnewbuf(rel, blkno);
newbuf = _hash_getnewbuf(rel, blkno, MAIN_FORKNUM);
metap->hashm_spares[splitnum]++;
@@ -503,7 +503,8 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
* All bits in the new bitmap page are set to "1", indicating "in use".
*/
void
_hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
_hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno,
ForkNumber forkNum)
{
Buffer buf;
Page pg;
@@ -520,7 +521,7 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
* page while holding the metapage lock, but this path is taken so seldom
* that it's not worth worrying about.
*/
buf = _hash_getnewbuf(rel, blkno);
buf = _hash_getnewbuf(rel, blkno, forkNum);
pg = BufferGetPage(buf);
/* initialize the page's special space */

View File

@@ -183,9 +183,9 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
* extend the index at a time.
*/
Buffer
_hash_getnewbuf(Relation rel, BlockNumber blkno)
_hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
{
BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
Buffer buf;
if (blkno == P_NEW)
@@ -197,13 +197,13 @@ _hash_getnewbuf(Relation rel, BlockNumber blkno)
/* smgr insists we use P_NEW to extend the relation */
if (blkno == nblocks)
{
buf = ReadBuffer(rel, P_NEW);
buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
if (BufferGetBlockNumber(buf) != blkno)
elog(ERROR, "unexpected hash relation size: %u, should be %u",
BufferGetBlockNumber(buf), blkno);
}
else
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO, NULL);
buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO, NULL);
LockBuffer(buf, HASH_WRITE);
@@ -324,7 +324,7 @@ _hash_chgbufaccess(Relation rel,
* multiple buffer locks is ignored.
*/
uint32
_hash_metapinit(Relation rel, double num_tuples)
_hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
{
HashMetaPage metap;
HashPageOpaque pageopaque;
@@ -340,7 +340,7 @@ _hash_metapinit(Relation rel, double num_tuples)
uint32 i;
/* safety check */
if (RelationGetNumberOfBlocks(rel) != 0)
if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
RelationGetRelationName(rel));
@@ -383,7 +383,7 @@ _hash_metapinit(Relation rel, double num_tuples)
* calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
pg = BufferGetPage(metabuf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
@@ -451,7 +451,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/* Allow interrupts, in case N is huge */
CHECK_FOR_INTERRUPTS();
buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i));
buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), forkNum);
pg = BufferGetPage(buf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
pageopaque->hasho_prevblkno = InvalidBlockNumber;
@@ -468,7 +468,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* Initialize first bitmap page
*/
_hash_initbitmap(rel, metap, num_buckets + 1);
_hash_initbitmap(rel, metap, num_buckets + 1, forkNum);
/* all done */
_hash_wrtbuf(rel, metabuf);
@@ -785,7 +785,7 @@ _hash_splitbucket(Relation rel,
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
nblkno = start_nblkno;
nbuf = _hash_getnewbuf(rel, nblkno);
nbuf = _hash_getnewbuf(rel, nblkno, MAIN_FORKNUM);
npage = BufferGetPage(nbuf);
/* initialize the new bucket's primary page */