1
0
mirror of https://github.com/postgres/postgres.git synced 2025-04-18 13:44:19 +03:00

Fix data loss in logical replication.

Data loss can happen when the DDLs like ALTER PUBLICATION ... ADD TABLE ...
or ALTER TYPE ...  that don't take a strong lock on table happens
concurrently to DMLs on the tables involved in the DDL. This happens
because logical decoding doesn't distribute invalidations to concurrent
transactions and those transactions use stale cache data to decode the
changes. The problem becomes bigger because we keep using the stale cache
even after those in-progress transactions are finished and skip the
changes required to be sent to the client.

This commit fixes the issue by distributing invalidation messages from
catalog-modifying transactions to all concurrent in-progress transactions.
This allows the necessary rebuild of the catalog cache when decoding new
changes after concurrent DDL.

We observed performance regression primarily during frequent execution of
*publication DDL* statements that modify the published tables. The
regression is minor or nearly nonexistent for DDLs that do not affect the
published tables or occur infrequently, making this a worthwhile cost to
resolve a longstanding data loss issue.

An alternative approach considered was to take a strong lock on each
affected table during publication modification. However, this would only
address issues related to publication DDLs (but not the ALTER TYPE ...)
and require locking every relation in the database for publications
created as FOR ALL TABLES, which is impractical.

The bug exists in all supported branches, but we are backpatching till 14.
The fix for 13 requires somewhat bigger changes than this fix, so the fix
for that branch is still under discussion.

Reported-by: hubert depesz lubaczewski <depesz@depesz.com>
Reported-by: Tomas Vondra <tomas.vondra@enterprisedb.com>
Author: Shlok Kyal <shlok.kyal.oss@gmail.com>
Author: Hayato Kuroda <kuroda.hayato@fujitsu.com>
Reviewed-by: Zhijie Hou <houzj.fnst@fujitsu.com>
Reviewed-by: Masahiko Sawada <sawada.mshk@gmail.com>
Reviewed-by: Amit Kapila <amit.kapila16@gmail.com>
Tested-by: Benoit Lobréau <benoit.lobreau@dalibo.com>
Backpatch-through: 14
Discussion: https://postgr.es/m/de52b282-1166-1180-45a2-8d8917ca74c6@enterprisedb.com
Discussion: https://postgr.es/m/CAD21AoAenVqiMjpN-PvGHL1N9DWnHSq673bfgr6phmBUzx=kLQ@mail.gmail.com
This commit is contained in:
Amit Kapila 2025-04-10 12:22:30 +05:30
parent 115f45e9af
commit 0434033e8b
6 changed files with 134 additions and 15 deletions

View File

@ -8,7 +8,8 @@ REGRESS = ddl xact rewrite toast permissions decoding_in_xact \
spill slot truncate stream stats twophase twophase_stream
ISOLATION = mxact delayed_startup ondisk_startup concurrent_ddl_dml \
oldest_xmin snapshot_transfer subxact_without_top concurrent_stream \
twophase_snapshot catalog_change_snapshot skip_snapshot_restore
twophase_snapshot catalog_change_snapshot skip_snapshot_restore \
invalidation_distrubution
REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf
ISOLATION_OPTS = --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf

View File

@ -0,0 +1,20 @@
Parsed test spec with 2 sessions
starting permutation: s1_insert_tbl1 s1_begin s1_insert_tbl1 s2_alter_pub_add_tbl s1_commit s1_insert_tbl1 s2_get_binary_changes
step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
step s1_begin: BEGIN;
step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
step s2_alter_pub_add_tbl: ALTER PUBLICATION pub ADD TABLE tbl1;
step s1_commit: COMMIT;
step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
step s2_get_binary_changes: SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '2', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73;
count
-----
1
(1 row)
?column?
--------
stop
(1 row)

View File

@ -0,0 +1,32 @@
# Test that catalog cache invalidation messages are distributed to ongoing
# transactions, ensuring they can access the updated catalog content after
# processing these messages.
setup
{
SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'pgoutput');
CREATE TABLE tbl1(val1 integer, val2 integer);
CREATE PUBLICATION pub;
}
teardown
{
DROP TABLE tbl1;
DROP PUBLICATION pub;
SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot');
}
session "s1"
setup { SET synchronous_commit=on; }
step "s1_begin" { BEGIN; }
step "s1_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (1, 1); }
step "s1_commit" { COMMIT; }
session "s2"
setup { SET synchronous_commit=on; }
step "s2_alter_pub_add_tbl" { ALTER PUBLICATION pub ADD TABLE tbl1; }
step "s2_get_binary_changes" { SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '2', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73; }
# Expect to get one insert change. LOGICAL_REP_MSG_INSERT = 'I'
permutation "s1_insert_tbl1" "s1_begin" "s1_insert_tbl1" "s2_alter_pub_add_tbl" "s1_commit" "s1_insert_tbl1" "s2_get_binary_changes"

View File

@ -5196,3 +5196,26 @@ restart:
*cmax = ent->cmax;
return true;
}
/*
* Count invalidation messages of specified transaction.
*
* Returns number of messages, and msgs is set to the pointer of the linked
* list for the messages.
*/
uint32
ReorderBufferGetInvalidations(ReorderBuffer *rb, TransactionId xid,
SharedInvalidationMessage **msgs)
{
ReorderBufferTXN *txn;
txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
false);
if (txn == NULL)
return 0;
*msgs = txn->invalidations;
return txn->ninvalidations;
}

View File

@ -290,7 +290,7 @@ static void SnapBuildFreeSnapshot(Snapshot snap);
static void SnapBuildSnapIncRefcount(Snapshot snap);
static void SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn);
static void SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid);
/* xlog reading helper functions for SnapBuildProcessRunningXacts */
static bool SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *running);
@ -843,15 +843,15 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid,
}
/*
* Add a new Snapshot to all transactions we're decoding that currently are
* in-progress so they can see new catalog contents made by the transaction
* that just committed. This is necessary because those in-progress
* transactions will use the new catalog's contents from here on (at the very
* least everything they do needs to be compatible with newer catalog
* contents).
* Add a new Snapshot and invalidation messages to all transactions we're
* decoding that currently are in-progress so they can see new catalog contents
* made by the transaction that just committed. This is necessary because those
* in-progress transactions will use the new catalog's contents from here on
* (at the very least everything they do needs to be compatible with newer
* catalog contents).
*/
static void
SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn)
SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
{
dlist_iter txn_i;
ReorderBufferTXN *txn;
@ -859,7 +859,8 @@ SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn)
/*
* Iterate through all toplevel transactions. This can include
* subtransactions which we just don't yet know to be that, but that's
* fine, they will just get an unnecessary snapshot queued.
* fine, they will just get an unnecessary snapshot and invalidations
* queued.
*/
dlist_foreach(txn_i, &builder->reorder->toplevel_by_lsn)
{
@ -872,6 +873,14 @@ SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn)
* transaction which in turn implies we don't yet need a snapshot at
* all. We'll add a snapshot when the first change gets queued.
*
* Similarly, we don't need to add invalidations to a transaction whose
* base snapshot is not yet set. Once a base snapshot is built, it will
* include the xids of committed transactions that have modified the
* catalog, thus reflecting the new catalog contents. The existing
* catalog cache will have already been invalidated after processing
* the invalidations in the transaction that modified catalogs,
* ensuring that a fresh cache is constructed during decoding.
*
* NB: This works correctly even for subtransactions because
* ReorderBufferAssignChild() takes care to transfer the base snapshot
* to the top-level transaction, and while iterating the changequeue
@ -881,13 +890,13 @@ SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn)
continue;
/*
* We don't need to add snapshot to prepared transactions as they
* should not see the new catalog contents.
* We don't need to add snapshot or invalidations to prepared
* transactions as they should not see the new catalog contents.
*/
if (rbtxn_prepared(txn) || rbtxn_skip_prepared(txn))
continue;
elog(DEBUG2, "adding a new snapshot to %u at %X/%X",
elog(DEBUG2, "adding a new snapshot and invalidations to %u at %X/%X",
txn->xid, LSN_FORMAT_ARGS(lsn));
/*
@ -897,6 +906,33 @@ SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn)
SnapBuildSnapIncRefcount(builder->snapshot);
ReorderBufferAddSnapshot(builder->reorder, txn->xid, lsn,
builder->snapshot);
/*
* Add invalidation messages to the reorder buffer of in-progress
* transactions except the current committed transaction, for which we
* will execute invalidations at the end.
*
* It is required, otherwise, we will end up using the stale catcache
* contents built by the current transaction even after its decoding,
* which should have been invalidated due to concurrent catalog
* changing transaction.
*/
if (txn->xid != xid)
{
uint32 ninvalidations;
SharedInvalidationMessage *msgs = NULL;
ninvalidations = ReorderBufferGetInvalidations(builder->reorder,
xid, &msgs);
if (ninvalidations > 0)
{
Assert(msgs != NULL);
ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn,
ninvalidations, msgs);
}
}
}
}
@ -1175,8 +1211,11 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
/* refcount of the snapshot builder for the new snapshot */
SnapBuildSnapIncRefcount(builder->snapshot);
/* add a new catalog snapshot to all currently running transactions */
SnapBuildDistributeNewCatalogSnapshot(builder, lsn);
/*
* Add a new catalog snapshot and invalidations messages to all
* currently running transactions.
*/
SnapBuildDistributeSnapshotAndInval(builder, lsn, xid);
}
}

View File

@ -676,6 +676,10 @@ TransactionId ReorderBufferGetOldestXmin(ReorderBuffer *rb);
void ReorderBufferSetRestartPoint(ReorderBuffer *, XLogRecPtr ptr);
uint32 ReorderBufferGetInvalidations(ReorderBuffer *rb,
TransactionId xid,
SharedInvalidationMessage **msgs);
void StartupReorderBuffer(void);
#endif