1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-23 03:21:12 +03:00

heap' logging

This commit is contained in:
Vadim B. Mikheev
2000-07-03 02:54:21 +00:00
parent 80c646958a
commit 1b67fe17b8
7 changed files with 132 additions and 132 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.74 2000/07/02 22:00:27 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.75 2000/07/03 02:54:15 vadim Exp $
*
*
* INTERFACE ROUTINES
@ -1271,10 +1271,9 @@ heap_get_latest_tid(Relation relation,
Oid
heap_insert(Relation relation, HeapTuple tup)
{
/* ----------------
* increment access statistics
* ----------------
*/
Buffer buffer;
/* increment access statistics */
tup->tableOid = relation->rd_id;
IncrHeapAccessStat(local_insert);
IncrHeapAccessStat(global_insert);
@ -1300,7 +1299,11 @@ heap_insert(Relation relation, HeapTuple tup)
tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
RelationPutHeapTupleAtEnd(relation, tup);
/* Find buffer for this tuple */
buffer = RelationGetBufferForTuple(relation, tup->t_len, InvalidBuffer);
/* NO ELOG(ERROR) from here till changes are logged */
RelationPutHeapTuple(relation, buffer, tup);
#ifdef XLOG
/* XLOG stuff */
@ -1308,7 +1311,8 @@ heap_insert(Relation relation, HeapTuple tup)
xl_heap_insert xlrec;
xlrec.itid.dbId = relation->rd_lockInfo.lockRelId.dbId;
xlrec.itid.relId = relation->rd_lockInfo.lockRelId.relId;
XXX xlrec.itid.tid = tp.t_self;
xlrec.itid.cid = GetCurrentCommandId();
xlrec.itid.tid = tup->t_self;
xlrec.t_natts = tup->t_data->t_natts;
xlrec.t_oid = tup->t_data->t_oid;
xlrec.t_hoff = tup->t_data->t_hoff;
@ -1319,10 +1323,14 @@ XXX xlrec.itid.tid = tp.t_self;
(char*) tup->t_data + offsetof(HeapTupleHeaderData, tbits),
tup->t_len - offsetof(HeapTupleHeaderData, tbits));
dp->pd_lsn = recptr;
((PageHeader) BufferGetPage(buffer))->pd_lsn = recptr;
((PageHeader) BufferGetPage(buffer))->pd_sui = ThisStartUpID;
}
#endif
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
WriteBuffer(buffer);
if (IsSystemRelationName(RelationGetRelationName(relation)))
RelationMark4RollbackHeapTuple(relation, tup);
@ -1417,11 +1425,13 @@ l1:
xl_heap_delete xlrec;
xlrec.dtid.dbId = relation->rd_lockInfo.lockRelId.dbId;
xlrec.dtid.relId = relation->rd_lockInfo.lockRelId.relId;
xlrec.dtid.cid = GetCurrentCommandId();
xlrec.dtid.tid = tp.t_self;
XLogRecPtr recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE,
(char*) xlrec, sizeof(xlrec), NULL, 0);
dp->pd_lsn = recptr;
dp->pd_sui = ThisStartUpID;
}
#endif
@ -1451,7 +1461,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
ItemId lp;
HeapTupleData oldtup;
PageHeader dp;
Buffer buffer;
Buffer buffer, newbuf;
int result;
newtup->tableOid = relation->rd_id;
@ -1531,43 +1541,65 @@ l2:
newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
newtup->t_data->t_infomask |= (HEAP_XMAX_INVALID | HEAP_UPDATED);
/* logically delete old item */
/* Find buffer for new tuple */
if ((unsigned) MAXALIGN(newtup->t_len) <= PageGetFreeSpace((Page) dp))
newbuf = buffer;
else
newbuf = RelationGetBufferForTuple(relation, newtup->t_len, buffer);
/* NO ELOG(ERROR) from here till changes are logged */
/* insert new tuple */
RelationPutHeapTuple(relation, newbuf, newtup);
/* logically delete old tuple */
TransactionIdStore(GetCurrentTransactionId(), &(oldtup.t_data->t_xmax));
oldtup.t_data->t_cmax = GetCurrentCommandId();
oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
/* insert new item */
if ((unsigned) MAXALIGN(newtup->t_len) <= PageGetFreeSpace((Page) dp))
RelationPutHeapTuple(relation, buffer, newtup);
else
{
/*
* New item won't fit on same page as old item, have to look for a
* new place to put it. Note that we have to unlock current buffer
* context - not good but RelationPutHeapTupleAtEnd uses extend
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
RelationPutHeapTupleAtEnd(relation, newtup);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
}
/* mark for rollback caches */
RelationMark4RollbackHeapTuple(relation, newtup);
/*
* New item in place, now record address of new tuple in t_ctid of old
* one.
*/
/* record address of new tuple in t_ctid of old one */
oldtup.t_data->t_ctid = newtup->t_self;
#ifdef XLOG
/* XLOG stuff */
{
xl_heap_update xlrec;
xlrec.dtid.dbId = relation->rd_lockInfo.lockRelId.dbId;
xlrec.dtid.relId = relation->rd_lockInfo.lockRelId.relId;
xlrec.dtid.cid = GetCurrentCommandId();
xlrec.itid.tid = newtup->t_self;
xlrec.t_natts = newtup->t_data->t_natts;
xlrec.t_hoff = newtup->t_data->t_hoff;
xlrec.mask = newtup->t_data->t_infomask;
XLogRecPtr recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_UPDATE,
(char*) xlrec, sizeof(xlrec),
(char*) newtup->t_data + offsetof(HeapTupleHeaderData, tbits),
newtup->t_len - offsetof(HeapTupleHeaderData, tbits));
if (newbuf != buffer)
{
((PageHeader) BufferGetPage(newbuf))->pd_lsn = recptr;
((PageHeader) BufferGetPage(newbuf))->pd_sui = ThisStartUpID;
}
((PageHeader) BufferGetPage(buffer))->pd_lsn = recptr;
((PageHeader) BufferGetPage(buffer))->pd_sui = ThisStartUpID;
}
#endif
if (newbuf != buffer)
{
LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
WriteBuffer(newbuf);
}
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
WriteBuffer(buffer);
/* invalidate caches */
RelationInvalidateHeapTuple(relation, &oldtup);
WriteBuffer(buffer);
RelationMark4RollbackHeapTuple(relation, newtup);
return HeapTupleMayBeUpdated;
}
@ -1648,6 +1680,14 @@ l3:
return result;
}
#ifdef XLOG
/*
* XLOG stuff: no logging is required as long as we have no
* savepoints. For savepoints private log could be used...
*/
((PageHeader) BufferGetPage(*buffer))->pd_sui = ThisStartUpID;
#endif
/* store transaction information of xact marking the tuple */
TransactionIdStore(GetCurrentTransactionId(), &(tuple->t_data->t_xmax));
tuple->t_data->t_cmax = GetCurrentCommandId();

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Id: hio.c,v 1.31 2000/04/12 17:14:45 momjian Exp $
* $Id: hio.c,v 1.32 2000/07/03 02:54:15 vadim Exp $
*
*-------------------------------------------------------------------------
*/
@ -19,17 +19,11 @@
#include "access/hio.h"
/*
* amputunique - place tuple at tid
* Currently on errors, calls elog. Perhaps should return -1?
* Possible errors include the addition of a tuple to the page
* between the time the linep is chosen and the page is L_UP'd.
* RelationPutHeapTuple - place tuple at specified page
*
* This should be coordinated with the B-tree code.
* Probably needs to have an amdelunique to allow for
* internal index records to be deleted and reordered as needed.
* For the heap AM, this should never be needed.
* !!! ELOG(ERROR) IS DISALLOWED HERE !!!
*
* Note - we assume that caller hold BUFFER_LOCK_EXCLUSIVE on the buffer.
* Note - we assume that caller hold BUFFER_LOCK_EXCLUSIVE on the buffer.
*
*/
void
@ -57,62 +51,41 @@ RelationPutHeapTuple(Relation relation,
offnum = PageAddItem((Page) pageHeader, (Item) tuple->t_data,
tuple->t_len, InvalidOffsetNumber, LP_USED);
if (offnum == InvalidOffsetNumber)
elog(STOP, "RelationPutHeapTuple: failed to add tuple");
itemId = PageGetItemId((Page) pageHeader, offnum);
item = PageGetItem((Page) pageHeader, itemId);
ItemPointerSet(&((HeapTupleHeader) item)->t_ctid,
BufferGetBlockNumber(buffer), offnum);
/*
* Let the caller do this!
*
* WriteBuffer(buffer);
*/
/* return an accurate tuple */
ItemPointerSet(&tuple->t_self, BufferGetBlockNumber(buffer), offnum);
}
/*
* This routine is another in the series of attempts to reduce the number
* of I/O's and system calls executed in the various benchmarks. In
* particular, this routine is used to append data to the end of a relation
* file without excessive lseeks. This code should do no more than 2 semops
* in the ideal case.
* RelationGetBufferForTuple
*
* Eventually, we should cache the number of blocks in a relation somewhere.
* Until that time, this code will have to do an lseek to determine the number
* of blocks in a relation.
* Returns (locked) buffer to add tuple with given len.
* If Ubuf is valid then no attempt to lock it should be made -
* this is for heap_update...
*
* This code should ideally do at most 4 semops, 1 lseek, and possibly 1 write
* to do an append; it's possible to eliminate 2 of the semops if we do direct
* buffer stuff (!); the lseek and the write can go if we get
* RelationGetNumberOfBlocks to be useful.
* ELOG(ERROR) is allowed here, so this routine *must* be called
* before any (unlogged) changes are made in buffer pool.
*
* NOTE: This code presumes that we have a write lock on the relation.
* Not now - we use extend locking...
*
* Also note that this routine probably shouldn't have to exist, and does
* screw up the call graph rather badly, but we are wasting so much time and
* system resources being massively general that we are losing badly in our
* performance benchmarks.
*/
void
RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
Buffer
RelationGetBufferForTuple(Relation relation, Size len, Buffer Ubuf)
{
Buffer buffer;
Page pageHeader;
BlockNumber lastblock;
OffsetNumber offnum;
Size len;
ItemId itemId;
Item item;
len = MAXALIGN(tuple->t_len); /* be conservative */
len = MAXALIGN(len); /* be conservative */
/*
* If we're gonna fail for oversize tuple, do it right away... this
* code should go away eventually.
* If we're gonna fail for oversize tuple, do it right away
*/
if (len > MaxTupleSize)
elog(ERROR, "Tuple is too big: size %u, max size %ld",
@ -152,7 +125,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
else
buffer = ReadBuffer(relation, lastblock - 1);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (buffer != Ubuf)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
pageHeader = (Page) BufferGetPage(buffer);
/*
@ -160,7 +134,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
*/
if (len > PageGetFreeSpace(pageHeader))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
if (buffer != Ubuf)
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
buffer = ReleaseAndReadBuffer(buffer, relation, P_NEW);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
pageHeader = (Page) BufferGetPage(buffer);
@ -168,36 +143,22 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
if (len > PageGetFreeSpace(pageHeader))
{
/*
* BUG: by elog'ing here, we leave the new buffer locked and
* not marked dirty, which may result in an invalid page
* header being left on disk. But we should not get here
* given the test at the top of the routine, and the whole
* deal should go away when we implement tuple splitting
* anyway...
*/
elog(ERROR, "Tuple is too big: size %u", len);
/* We should not get here given the test at the top */
elog(STOP, "Tuple is too big: size %u", len);
}
}
/*
* Caller should check space in Ubuf but...
*/
else if (buffer == Ubuf)
{
ReleaseBuffer(buffer);
buffer = Ubuf;
}
if (!relation->rd_myxactonly)
UnlockPage(relation, 0, ExclusiveLock);
offnum = PageAddItem((Page) pageHeader, (Item) tuple->t_data,
tuple->t_len, InvalidOffsetNumber, LP_USED);
itemId = PageGetItemId((Page) pageHeader, offnum);
item = PageGetItem((Page) pageHeader, itemId);
lastblock = BufferGetBlockNumber(buffer);
ItemPointerSet(&((HeapTupleHeader) item)->t_ctid, lastblock, offnum);
/* return an accurate tuple self-pointer */
ItemPointerSet(&tuple->t_self, lastblock, offnum);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
WriteBuffer(buffer);
return(buffer);
}