diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index af101689282..1fbf52f1105 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -5638,6 +5638,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple) */ static TransactionId FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, + TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags) { @@ -5664,15 +5665,25 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, *flags |= FRM_INVALIDATE_XMAX; return InvalidTransactionId; } + else if (MultiXactIdPrecedes(multi, relminmxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found multixact %u from before relminmxid %u", + multi, relminmxid))); else if (MultiXactIdPrecedes(multi, cutoff_multi)) { /* - * This old multi cannot possibly have members still running. If it - * was a locker only, it can be removed without any further - * consideration; but if it contained an update, we might need to - * preserve it. + * This old multi cannot possibly have members still running, but + * verify just in case. If it was a locker only, it can be removed + * without any further consideration; but if it contained an update, we + * might need to preserve it. */ - Assert(!MultiXactIdIsRunning(multi)); + if (MultiXactIdIsRunning(multi)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("multixact %u from before cutoff %u found to be still running", + multi, cutoff_multi))); + if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)) { *flags |= FRM_INVALIDATE_XMAX; @@ -5686,13 +5697,22 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, /* wasn't only a lock, xid needs to be valid */ Assert(TransactionIdIsValid(xid)); + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found update xid %u from before relfrozenxid %u", + xid, relfrozenxid))); + /* * If the xid is older than the cutoff, it has to have aborted, * otherwise the tuple would have gotten pruned away. */ if (TransactionIdPrecedes(xid, cutoff_xid)) { - Assert(!TransactionIdDidCommit(xid)); + if (TransactionIdDidCommit(xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("cannot freeze committed update xid %u", xid))); *flags |= FRM_INVALIDATE_XMAX; xid = InvalidTransactionId; /* not strictly necessary */ } @@ -5763,6 +5783,13 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, { TransactionId xid = members[i].xid; + Assert(TransactionIdIsValid(xid)); + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found update xid %u from before relfrozenxid %u", + xid, relfrozenxid))); + /* * It's an update; should we keep it? If the transaction is known * aborted then it's okay to ignore it, otherwise not. However, @@ -5796,6 +5823,26 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, Assert(!TransactionIdIsValid(update_xid)); update_xid = xid; } + else + { + /* + * Not in progress, not committed -- must be aborted or crashed; + * we can ignore it. + */ + } + + /* + * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the + * update Xid cannot possibly be older than the xid cutoff. The + * presence of such a tuple would cause corruption, so be paranoid + * and check. + */ + if (TransactionIdIsValid(update_xid) && + TransactionIdPrecedes(update_xid, cutoff_xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found update xid %u from before xid cutoff %u", + update_xid, cutoff_xid))); /* * If we determined that it's an Xid corresponding to an update @@ -5907,8 +5954,9 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * recovery. We really need to remove old xids. */ bool -heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, - TransactionId cutoff_multi, +heap_prepare_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, TransactionId relminmxid, + TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz) { @@ -5923,11 +5971,25 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* Process xmin */ xid = HeapTupleHeaderGetXmin(tuple); - if (TransactionIdIsNormal(xid) && - TransactionIdPrecedes(xid, cutoff_xid)) + if (TransactionIdIsNormal(xid)) { - frz->t_infomask |= HEAP_XMIN_FROZEN; - changed = true; + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found xmin %u from before relfrozenxid %u", + xid, relfrozenxid))); + + if (TransactionIdPrecedes(xid, cutoff_xid)) + { + if (!TransactionIdDidCommit(xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("uncommitted xmin %u from before xid cutoff %u needs to be frozen", + xid, cutoff_xid))); + + frz->t_infomask |= HEAP_XMIN_FROZEN; + changed = true; + } } /* @@ -5947,6 +6009,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, uint16 flags; newxmax = FreezeMultiXactId(xid, tuple->t_infomask, + relfrozenxid, relminmxid, cutoff_xid, cutoff_multi, &flags); if (flags & FRM_INVALIDATE_XMAX) @@ -5992,10 +6055,30 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, Assert(flags & FRM_NOOP); } } - else if (TransactionIdIsNormal(xid) && - TransactionIdPrecedes(xid, cutoff_xid)) + else if (TransactionIdIsNormal(xid)) { - freeze_xmax = true; + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found xmax %u from before relfrozenxid %u", + xid, relfrozenxid))); + + if (TransactionIdPrecedes(xid, cutoff_xid)) + { + /* + * If we freeze xmax, make absolutely sure that it's not an XID + * that is important. (Note, a lock-only xmax can be removed + * independent of committedness, since a committed lock holder has + * released the lock). + */ + if (!(tuple->t_infomask & HEAP_XMAX_LOCK_ONLY) && + TransactionIdDidCommit(xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("cannot freeze committed xmax %u", + xid))); + freeze_xmax = true; + } } if (freeze_xmax) @@ -6089,13 +6172,16 @@ heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz) * Useful for callers like CLUSTER that perform their own WAL logging. */ bool -heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, - TransactionId cutoff_multi) +heap_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, TransactionId relminmxid, + TransactionId cutoff_xid, TransactionId cutoff_multi) { xl_heap_freeze_tuple frz; bool do_freeze; - do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi, + do_freeze = heap_prepare_freeze_tuple(tuple, + relfrozenxid, relminmxid, + cutoff_xid, cutoff_multi, &frz); /* diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 07f08a4d89f..c37a6679e12 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -407,7 +407,10 @@ rewrite_heap_tuple(RewriteState state, * While we have our hands on the tuple, we may as well freeze any * eligible xmin or xmax, so that future VACUUM effort can be saved. */ - heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, + heap_freeze_tuple(new_tuple->t_data, + state->rs_old_rel->rd_rel->relfrozenxid, + state->rs_old_rel->rd_rel->relminmxid, + state->rs_freeze_xid, state->rs_cutoff_multi); /* diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 95f5952f63f..f96f47f56c2 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -429,6 +429,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, blkno; HeapTupleData tuple; char *relname; + TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid; + TransactionId relminmxid = onerel->rd_rel->relminmxid; BlockNumber empty_pages, vacuumed_pages; double num_tuples, @@ -821,6 +823,13 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * tuple, we choose to keep it, because it'll be a lot * cheaper to get rid of it in the next pruning pass than * to treat it like an indexed tuple. + * + * If this were to happen for a tuple that actually needed + * to be deleted, we'd be in trouble, because it'd + * possibly leave a tuple below the relation's xmin + * horizon alive. heap_prepare_freeze_tuple() is prepared + * to detect that case and abort the transaction, + * preventing corruption. */ if (HeapTupleIsHotUpdated(&tuple) || HeapTupleIsHeapOnly(&tuple)) @@ -910,8 +919,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * Each non-removable tuple must be checked to see if it needs * freezing. Note we already have exclusive buffer lock. */ - if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit, - MultiXactCutoff, &frozen[nfrozen])) + if (heap_prepare_freeze_tuple(tuple.t_data, + relfrozenxid, relminmxid, + FreezeLimit, MultiXactCutoff, + &frozen[nfrozen])) frozen[nfrozen++].offset = offnum; } } /* scan along page */ diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 493839f60e9..c478ccc1d69 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -148,8 +148,9 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, bool follow_update, Buffer *buffer, HeapUpdateFailureData *hufd); extern void heap_inplace_update(Relation relation, HeapTuple tuple); -extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, - TransactionId cutoff_multi); +extern bool heap_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, TransactionId relminmxid, + TransactionId cutoff_xid, TransactionId cutoff_multi); extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf); diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index 9557486635a..13af62911e0 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -386,6 +386,8 @@ extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples); extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, + TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz);