1
0
mirror of https://github.com/postgres/postgres.git synced 2025-12-07 12:02:30 +03:00

Set next multixid's offset when creating a new multixid

With this commit, the next multixid's offset will always be set on the
offsets page, by the time that a backend might try to read it, so we
no longer need the waiting mechanism with the condition variable. In
other words, this eliminates "corner case 2" mentioned in the
comments.

The waiting mechanism was broken in a few scenarios:

- When nextMulti was advanced without WAL-logging the next
  multixid. For example, if a later multixid was already assigned and
  WAL-logged before the previous one was WAL-logged, and then the
  server crashed. In that case the next offset would never be set in
  the offsets SLRU, and a query trying to read it would get stuck
  waiting for it. Same thing could happen if pg_resetwal was used to
  forcibly advance nextMulti.

- In hot standby mode, a deadlock could happen where one backend waits
  for the next multixid assignment record, but WAL replay is not
  advancing because of a recovery conflict with the waiting backend.

The old TAP test used carefully placed injection points to exercise
the old waiting code, but now that the waiting code is gone, much of
the old test is no longer relevant. Rewrite the test to reproduce the
IPC/MultixactCreation hang after crash recovery instead, and to verify
that previously recorded multixids stay readable.

Backpatch to all supported versions. In back-branches, we still need
to be able to read WAL that was generated before this fix, so in the
back-branches this includes a hack to initialize the next offsets page
when replaying XLOG_MULTIXACT_CREATE_ID for the last multixid on a
page. On 'master', bump XLOG_PAGE_MAGIC instead to indicate that the
WAL is not compatible.

Author: Andrey Borodin <amborodin@acm.org>
Reviewed-by: Dmitry Yurichev <dsy.075@yandex.ru>
Reviewed-by: Álvaro Herrera <alvherre@kurilemu.de>
Reviewed-by: Kirill Reshke <reshkekirill@gmail.com>
Reviewed-by: Ivan Bykov <i.bykov@modernsys.ru>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://www.postgresql.org/message-id/172e5723-d65f-4eec-b512-14beacb326ce@yandex.ru
Backpatch-through: 14
This commit is contained in:
Heikki Linnakangas
2025-12-03 19:15:08 +02:00
parent 9b05e2ec08
commit 789d65364c
4 changed files with 126 additions and 179 deletions

View File

@@ -79,7 +79,6 @@
#include "pg_trace.h" #include "pg_trace.h"
#include "pgstat.h" #include "pgstat.h"
#include "postmaster/autovacuum.h" #include "postmaster/autovacuum.h"
#include "storage/condition_variable.h"
#include "storage/pmsignal.h" #include "storage/pmsignal.h"
#include "storage/proc.h" #include "storage/proc.h"
#include "storage/procarray.h" #include "storage/procarray.h"
@@ -271,12 +270,6 @@ typedef struct MultiXactStateData
/* support for members anti-wraparound measures */ /* support for members anti-wraparound measures */
MultiXactOffset offsetStopLimit; /* known if oldestOffsetKnown */ MultiXactOffset offsetStopLimit; /* known if oldestOffsetKnown */
/*
* This is used to sleep until a multixact offset is written when we want
* to create the next one.
*/
ConditionVariable nextoff_cv;
/* /*
* Per-backend data starts here. We have two arrays stored in the area * Per-backend data starts here. We have two arrays stored in the area
* immediately following the MultiXactStateData struct. Each is indexed by * immediately following the MultiXactStateData struct. Each is indexed by
@@ -912,13 +905,33 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
int entryno; int entryno;
int slotno; int slotno;
MultiXactOffset *offptr; MultiXactOffset *offptr;
int i; MultiXactId next;
int64 next_pageno;
int next_entryno;
MultiXactOffset *next_offptr;
LWLock *lock; LWLock *lock;
LWLock *prevlock = NULL; LWLock *prevlock = NULL;
/* position of this multixid in the offsets SLRU area */
pageno = MultiXactIdToOffsetPage(multi); pageno = MultiXactIdToOffsetPage(multi);
entryno = MultiXactIdToOffsetEntry(multi); entryno = MultiXactIdToOffsetEntry(multi);
/* position of the next multixid */
next = multi + 1;
if (next < FirstMultiXactId)
next = FirstMultiXactId;
next_pageno = MultiXactIdToOffsetPage(next);
next_entryno = MultiXactIdToOffsetEntry(next);
/*
* Set the starting offset of this multixid's members.
*
* In the common case, it was already be set by the previous
* RecordNewMultiXact call, as this was the next multixid of the previous
* multixid. But if multiple backends are generating multixids
* concurrently, we might race ahead and get called before the previous
* multixid.
*/
lock = SimpleLruGetBankLock(MultiXactOffsetCtl, pageno); lock = SimpleLruGetBankLock(MultiXactOffsetCtl, pageno);
LWLockAcquire(lock, LW_EXCLUSIVE); LWLockAcquire(lock, LW_EXCLUSIVE);
@@ -933,22 +946,50 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno]; offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno];
offptr += entryno; offptr += entryno;
*offptr = offset; if (*offptr != offset)
{
/* should already be set to the correct value, or not at all */
Assert(*offptr == 0);
*offptr = offset;
MultiXactOffsetCtl->shared->page_dirty[slotno] = true;
}
MultiXactOffsetCtl->shared->page_dirty[slotno] = true; /*
* Set the next multixid's offset to the end of this multixid's members.
*/
if (next_pageno == pageno)
{
next_offptr = offptr + 1;
}
else
{
/* must be the first entry on the page */
Assert(next_entryno == 0 || next == FirstMultiXactId);
/* Swap the lock for a lock on the next page */
LWLockRelease(lock);
lock = SimpleLruGetBankLock(MultiXactOffsetCtl, next_pageno);
LWLockAcquire(lock, LW_EXCLUSIVE);
slotno = SimpleLruReadPage(MultiXactOffsetCtl, next_pageno, true, next);
next_offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno];
next_offptr += next_entryno;
}
if (*next_offptr != offset + nmembers)
{
/* should already be set to the correct value, or not at all */
Assert(*next_offptr == 0);
*next_offptr = offset + nmembers;
MultiXactOffsetCtl->shared->page_dirty[slotno] = true;
}
/* Release MultiXactOffset SLRU lock. */ /* Release MultiXactOffset SLRU lock. */
LWLockRelease(lock); LWLockRelease(lock);
/*
* If anybody was waiting to know the offset of this multixact ID we just
* wrote, they can read it now, so wake them up.
*/
ConditionVariableBroadcast(&MultiXactState->nextoff_cv);
prev_pageno = -1; prev_pageno = -1;
for (i = 0; i < nmembers; i++, offset++) for (int i = 0; i < nmembers; i++, offset++)
{ {
TransactionId *memberptr; TransactionId *memberptr;
uint32 *flagsptr; uint32 *flagsptr;
@@ -1138,8 +1179,11 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
result = FirstMultiXactId; result = FirstMultiXactId;
} }
/* Make sure there is room for the MXID in the file. */ /*
ExtendMultiXactOffset(result); * Make sure there is room for the next MXID in the file. Assigning this
* MXID sets the next MXID's offset already.
*/
ExtendMultiXactOffset(result + 1);
/* /*
* Reserve the members space, similarly to above. Also, be careful not to * Reserve the members space, similarly to above. Also, be careful not to
@@ -1300,11 +1344,8 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
int truelength; int truelength;
MultiXactId oldestMXact; MultiXactId oldestMXact;
MultiXactId nextMXact; MultiXactId nextMXact;
MultiXactId tmpMXact;
MultiXactOffset nextOffset;
MultiXactMember *ptr; MultiXactMember *ptr;
LWLock *lock; LWLock *lock;
bool slept = false;
debug_elog3(DEBUG2, "GetMembers: asked for %u", multi); debug_elog3(DEBUG2, "GetMembers: asked for %u", multi);
@@ -1351,14 +1392,12 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* error. * error.
* *
* Shared lock is enough here since we aren't modifying any global state. * Shared lock is enough here since we aren't modifying any global state.
* Acquire it just long enough to grab the current counter values. We may * Acquire it just long enough to grab the current counter values.
* need both nextMXact and nextOffset; see below.
*/ */
LWLockAcquire(MultiXactGenLock, LW_SHARED); LWLockAcquire(MultiXactGenLock, LW_SHARED);
oldestMXact = MultiXactState->oldestMultiXactId; oldestMXact = MultiXactState->oldestMultiXactId;
nextMXact = MultiXactState->nextMXact; nextMXact = MultiXactState->nextMXact;
nextOffset = MultiXactState->nextOffset;
LWLockRelease(MultiXactGenLock); LWLockRelease(MultiXactGenLock);
@@ -1378,38 +1417,17 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* Find out the offset at which we need to start reading MultiXactMembers * Find out the offset at which we need to start reading MultiXactMembers
* and the number of members in the multixact. We determine the latter as * and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next * the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about: * one's. However, there is one corner case to worry about:
* *
* 1. This multixact may be the latest one created, in which case there is * Because GetNewMultiXactId skips over offset zero, to reserve zero for
* no next one to look at. In this case the nextOffset value we just * to mean "unset", there is an ambiguity near the point of offset
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in: that
* is, another process may have done GetNewMultiXactId but not yet written
* the offset entry for that ID. In that scenario, it is guaranteed that
* the offset entry for that multixact exists (because GetNewMultiXactId
* won't release MultiXactGenLock until it does) but contains zero
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
* have this case. We handle this by sleeping on the condition variable
* we have just for this; the process in charge will signal the CV as soon
* as it has finished writing the multixact offset.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
* wraparound. If we see next multixact's offset is one, is that our * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent * multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th * increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid * member slot wasn't filled, it'll contain zero, and zero isn't a valid
* transaction ID so it can't be a multixact member. Therefore, if we * transaction ID so it can't be a multixact member. Therefore, if we
* read a zero from the members array, just ignore it. * read a zero from the members array, just ignore it.
*
* This is all pretty messy, but the mess occurs only in infrequent corner
* cases, so it seems better than holding the MultiXactGenLock for a long
* time on every multixact creation.
*/ */
retry:
pageno = MultiXactIdToOffsetPage(multi); pageno = MultiXactIdToOffsetPage(multi);
entryno = MultiXactIdToOffsetEntry(multi); entryno = MultiXactIdToOffsetEntry(multi);
@@ -1417,6 +1435,7 @@ retry:
lock = SimpleLruGetBankLock(MultiXactOffsetCtl, pageno); lock = SimpleLruGetBankLock(MultiXactOffsetCtl, pageno);
LWLockAcquire(lock, LW_EXCLUSIVE); LWLockAcquire(lock, LW_EXCLUSIVE);
/* read this multi's offset */
slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, true, multi); slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, true, multi);
offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno]; offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno];
offptr += entryno; offptr += entryno;
@@ -1424,22 +1443,13 @@ retry:
Assert(offset != 0); Assert(offset != 0);
/* /* read next multi's offset */
* Use the same increment rule as GetNewMultiXactId(), that is, don't
* handle wraparound explicitly until needed.
*/
tmpMXact = multi + 1;
if (nextMXact == tmpMXact)
{
/* Corner case 1: there is no next multixact */
length = nextOffset - offset;
}
else
{ {
MultiXactId tmpMXact;
MultiXactOffset nextMXOffset; MultiXactOffset nextMXOffset;
/* handle wraparound if needed */ /* handle wraparound if needed */
tmpMXact = multi + 1;
if (tmpMXact < FirstMultiXactId) if (tmpMXact < FirstMultiXactId)
tmpMXact = FirstMultiXactId; tmpMXact = FirstMultiXactId;
@@ -1472,18 +1482,10 @@ retry:
nextMXOffset = *offptr; nextMXOffset = *offptr;
if (nextMXOffset == 0) if (nextMXOffset == 0)
{ ereport(ERROR,
/* Corner case 2: next multixact is still being filled in */ (errcode(ERRCODE_DATA_CORRUPTED),
LWLockRelease(lock); errmsg("MultiXact %u has invalid next offset",
CHECK_FOR_INTERRUPTS(); multi)));
INJECTION_POINT("multixact-get-members-cv-sleep", NULL);
ConditionVariableSleep(&MultiXactState->nextoff_cv,
WAIT_EVENT_MULTIXACT_CREATION);
slept = true;
goto retry;
}
length = nextMXOffset - offset; length = nextMXOffset - offset;
} }
@@ -1491,12 +1493,7 @@ retry:
LWLockRelease(lock); LWLockRelease(lock);
lock = NULL; lock = NULL;
/* /* read the members */
* If we slept above, clean up state; it's no longer needed.
*/
if (slept)
ConditionVariableCancelSleep();
ptr = (MultiXactMember *) palloc(length * sizeof(MultiXactMember)); ptr = (MultiXactMember *) palloc(length * sizeof(MultiXactMember));
truelength = 0; truelength = 0;
@@ -1539,7 +1536,7 @@ retry:
if (!TransactionIdIsValid(*xactptr)) if (!TransactionIdIsValid(*xactptr))
{ {
/* Corner case 3: we must be looking at unused slot zero */ /* Corner case: we must be looking at unused slot zero */
Assert(offset == 0); Assert(offset == 0);
continue; continue;
} }
@@ -1986,7 +1983,6 @@ MultiXactShmemInit(void)
/* Make sure we zero out the per-backend state */ /* Make sure we zero out the per-backend state */
MemSet(MultiXactState, 0, SHARED_MULTIXACT_STATE_SIZE); MemSet(MultiXactState, 0, SHARED_MULTIXACT_STATE_SIZE);
ConditionVariableInit(&MultiXactState->nextoff_cv);
} }
else else
Assert(found); Assert(found);
@@ -2132,26 +2128,34 @@ TrimMultiXact(void)
pageno); pageno);
/* /*
* Zero out the remainder of the current offsets page. See notes in * Set the offset of nextMXact on the offsets page. This is normally done
* TrimCLOG() for background. Unlike CLOG, some WAL record covers every * in RecordNewMultiXact() of the previous multixact, but let's be sure
* pg_multixact SLRU mutation. Since, also unlike CLOG, we ignore the WAL * the next page exists, if the nextMXact was reset with pg_resetwal for
* rule "write xlog before data," nextMXact successors may carry obsolete, * example.
* nonzero offset values. Zero those so case 2 of GetMultiXactIdMembers() *
* operates normally. * Zero out the remainder of the page. See notes in TrimCLOG() for
* background. Unlike CLOG, some WAL record covers every pg_multixact
* SLRU mutation. Since, also unlike CLOG, we ignore the WAL rule "write
* xlog before data," nextMXact successors may carry obsolete, nonzero
* offset values.
*/ */
entryno = MultiXactIdToOffsetEntry(nextMXact); entryno = MultiXactIdToOffsetEntry(nextMXact);
if (entryno != 0)
{ {
int slotno; int slotno;
MultiXactOffset *offptr; MultiXactOffset *offptr;
LWLock *lock = SimpleLruGetBankLock(MultiXactOffsetCtl, pageno); LWLock *lock = SimpleLruGetBankLock(MultiXactOffsetCtl, pageno);
LWLockAcquire(lock, LW_EXCLUSIVE); LWLockAcquire(lock, LW_EXCLUSIVE);
slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, true, nextMXact); if (entryno == 0)
slotno = SimpleLruZeroPage(MultiXactOffsetCtl, pageno);
else
slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, true, nextMXact);
offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno]; offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno];
offptr += entryno; offptr += entryno;
MemSet(offptr, 0, BLCKSZ - (entryno * sizeof(MultiXactOffset))); *offptr = offset;
if (entryno != 0 && (entryno + 1) * sizeof(MultiXactOffset) != BLCKSZ)
MemSet(offptr + 1, 0, BLCKSZ - (entryno + 1) * sizeof(MultiXactOffset));
MultiXactOffsetCtl->shared->page_dirty[slotno] = true; MultiXactOffsetCtl->shared->page_dirty[slotno] = true;
LWLockRelease(lock); LWLockRelease(lock);

View File

@@ -31,7 +31,7 @@
/* /*
* Each page of XLOG file has a header like this: * Each page of XLOG file has a header like this:
*/ */
#define XLOG_PAGE_MAGIC 0xD119 /* can be used as WAL version indicator */ #define XLOG_PAGE_MAGIC 0xD11A /* can be used as WAL version indicator */
typedef struct XLogPageHeaderData typedef struct XLogPageHeaderData
{ {

View File

@@ -1,10 +1,6 @@
# Copyright (c) 2024-2025, PostgreSQL Global Development Group # Copyright (c) 2024-2025, PostgreSQL Global Development Group
# This test verifies edge case of reading a multixact: # Test multixid corner cases.
# when we have multixact that is followed by exactly one another multixact,
# and another multixact have no offset yet, we must wait until this offset
# becomes observable. Previously we used to wait for 1ms in a loop in this
# case, but now we use CV for this. This test is exercising such a sleep.
use strict; use strict;
use warnings FATAL => 'all'; use warnings FATAL => 'all';
@@ -19,9 +15,7 @@ if ($ENV{enable_injection_points} ne 'yes')
plan skip_all => 'Injection points not supported by this build'; plan skip_all => 'Injection points not supported by this build';
} }
my ($node, $result); my $node = PostgreSQL::Test::Cluster->new('main');
$node = PostgreSQL::Test::Cluster->new('mike');
$node->init; $node->init;
$node->append_conf('postgresql.conf', $node->append_conf('postgresql.conf',
"shared_preload_libraries = 'test_slru,injection_points'"); "shared_preload_libraries = 'test_slru,injection_points'");
@@ -29,95 +23,47 @@ $node->start;
$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); $node->safe_psql('postgres', q(CREATE EXTENSION injection_points));
$node->safe_psql('postgres', q(CREATE EXTENSION test_slru)); $node->safe_psql('postgres', q(CREATE EXTENSION test_slru));
# Test for Multixact generation edge case # This test creates three multixacts. The middle one is never
$node->safe_psql('postgres', # WAL-logged or recorded on the offsets page, because we pause the
q{select injection_points_attach('test-multixact-read','wait')}); # backend and crash the server before that. After restart, verify that
$node->safe_psql('postgres', # the other multixacts are readable, despite the middle one being
q{select injection_points_attach('multixact-get-members-cv-sleep','wait')} # lost.
);
# This session must observe sleep on the condition variable while generating a # Create the first multixact
# multixact. To achieve this it first will create a multixact, then pause my $bg_psql = $node->background_psql('postgres');
# before reading it. my $multi1 = $bg_psql->query_safe(q(SELECT test_create_multixact();));
my $observer = $node->background_psql('postgres');
# This query will create a multixact, and hang just before reading it. # Assign the middle multixact. Use an injection point to prevent it
$observer->query_until( # from being fully recorded.
qr/start/,
q{
\echo start
SELECT test_read_multixact(test_create_multixact());
});
$node->wait_for_event('client backend', 'test-multixact-read');
# This session will create the next Multixact. This is necessary to avoid
# multixact.c's non-sleeping edge case 1.
my $creator = $node->background_psql('postgres');
$node->safe_psql('postgres', $node->safe_psql('postgres',
q{SELECT injection_points_attach('multixact-create-from-members','wait');} q{SELECT injection_points_attach('multixact-create-from-members','wait');}
); );
# We expect this query to hang in the critical section after generating new $bg_psql->query_until(
# multixact, but before filling its offset into SLRU. qr/assigning lost multi/, q(
# Running an injection point inside a critical section requires it to be \echo assigning lost multi
# loaded beforehand.
$creator->query_until(
qr/start/, q{
\echo start
SELECT test_create_multixact(); SELECT test_create_multixact();
}); ));
$node->wait_for_event('client backend', 'multixact-create-from-members'); $node->wait_for_event('client backend', 'multixact-create-from-members');
# Ensure we have the backends waiting that we expect
is( $node->safe_psql(
'postgres',
q{SELECT string_agg(wait_event, ', ' ORDER BY wait_event)
FROM pg_stat_activity WHERE wait_event_type = 'InjectionPoint'}
),
'multixact-create-from-members, test-multixact-read',
"matching injection point waits");
# Now wake observer to get it to read the initial multixact. A subsequent
# multixact already exists, but that one doesn't have an offset assigned, so
# this will hit multixact.c's edge case 2.
$node->safe_psql('postgres',
q{SELECT injection_points_wakeup('test-multixact-read')});
$node->wait_for_event('client backend', 'multixact-get-members-cv-sleep');
# Ensure we have the backends waiting that we expect
is( $node->safe_psql(
'postgres',
q{SELECT string_agg(wait_event, ', ' ORDER BY wait_event)
FROM pg_stat_activity WHERE wait_event_type = 'InjectionPoint'}
),
'multixact-create-from-members, multixact-get-members-cv-sleep',
"matching injection point waits");
# Now we have two backends waiting in multixact-create-from-members and
# multixact-get-members-cv-sleep. Also we have 3 injections points set to wait.
# If we wakeup multixact-get-members-cv-sleep it will happen again, so we must
# detach it first. So let's detach all injection points, then wake up all
# backends.
$node->safe_psql('postgres',
q{SELECT injection_points_detach('test-multixact-read')});
$node->safe_psql('postgres', $node->safe_psql('postgres',
q{SELECT injection_points_detach('multixact-create-from-members')}); q{SELECT injection_points_detach('multixact-create-from-members')});
$node->safe_psql('postgres',
q{SELECT injection_points_detach('multixact-get-members-cv-sleep')});
$node->safe_psql('postgres', # Create the third multixid
q{SELECT injection_points_wakeup('multixact-create-from-members')}); my $multi2 = $node->safe_psql('postgres', q{SELECT test_create_multixact();});
$node->safe_psql('postgres',
q{SELECT injection_points_wakeup('multixact-get-members-cv-sleep')});
# Background psql will now be able to read the result and disconnect. # All set and done, it's time for hard restart
$observer->quit; $node->stop('immediate');
$creator->quit; $node->start;
$bg_psql->{run}->finish;
$node->stop; # Verify that the recorded multixids are readable
is( $node->safe_psql('postgres', qq{SELECT test_read_multixact('$multi1');}),
'',
'first recorded multi is readable');
is( $node->safe_psql('postgres', qq{SELECT test_read_multixact('$multi2');}),
'',
'second recorded multi is readable');
# If we reached this point - everything is OK.
ok(1);
done_testing(); done_testing();

View File

@@ -17,7 +17,6 @@
#include "access/multixact.h" #include "access/multixact.h"
#include "access/xact.h" #include "access/xact.h"
#include "fmgr.h" #include "fmgr.h"
#include "utils/injection_point.h"
PG_FUNCTION_INFO_V1(test_create_multixact); PG_FUNCTION_INFO_V1(test_create_multixact);
PG_FUNCTION_INFO_V1(test_read_multixact); PG_FUNCTION_INFO_V1(test_read_multixact);
@@ -37,8 +36,7 @@ test_create_multixact(PG_FUNCTION_ARGS)
} }
/* /*
* Reads given multixact after running an injection point. Discards local cache * Reads given multixact. Discards local cache to make a real read.
* to make a real read. Tailored for multixact testing.
*/ */
Datum Datum
test_read_multixact(PG_FUNCTION_ARGS) test_read_multixact(PG_FUNCTION_ARGS)
@@ -46,7 +44,6 @@ test_read_multixact(PG_FUNCTION_ARGS)
MultiXactId id = PG_GETARG_TRANSACTIONID(0); MultiXactId id = PG_GETARG_TRANSACTIONID(0);
MultiXactMember *members; MultiXactMember *members;
INJECTION_POINT("test-multixact-read", NULL);
/* discard caches */ /* discard caches */
AtEOXact_MultiXact(); AtEOXact_MultiXact();