mirror of
https://github.com/postgres/postgres.git
synced 2025-04-25 21:42:33 +03:00
Fix race condition with unprotected use of a latch pointer variable.
Commit 597a87ccc introduced a latch pointer variable to replace use of a long-lived shared latch in the shared WalRcvData structure. This was not well thought out, because there are now hazards of the pointer variable changing while it's being inspected by another process. This could obviously lead to a core dump in code like if (WalRcv->latch) SetLatch(WalRcv->latch); and there's a more remote risk of a torn read, if we have any platforms where reading/writing a pointer is not atomic. An actual problem would occur only if the walreceiver process exits (gracefully) while the startup process is trying to signal it, but that seems well within the realm of possibility. To fix, treat the pointer variable (not the referenced latch) as being protected by the WalRcv->mutex spinlock. There remains a race condition that we could apply SetLatch to a process latch that no longer belongs to the walreceiver, but I believe that's harmless: at worst it'd cause an extra wakeup of the next process to use that PGPROC structure. Back-patch to v10 where the faulty code was added. Discussion: https://postgr.es/m/22735.1507048202@sss.pgh.pa.us
This commit is contained in:
parent
89e434b59c
commit
45f9d08684
@ -256,13 +256,14 @@ WalReceiverMain(void)
|
||||
walrcv->lastMsgSendTime =
|
||||
walrcv->lastMsgReceiptTime = walrcv->latestWalEndTime = now;
|
||||
|
||||
/* Report the latch to use to awaken this process */
|
||||
walrcv->latch = &MyProc->procLatch;
|
||||
|
||||
SpinLockRelease(&walrcv->mutex);
|
||||
|
||||
/* Arrange to clean up at walreceiver exit */
|
||||
on_shmem_exit(WalRcvDie, 0);
|
||||
|
||||
walrcv->latch = &MyProc->procLatch;
|
||||
|
||||
/* Properly accept or ignore signals the postmaster might send us */
|
||||
pqsignal(SIGHUP, WalRcvSigHupHandler); /* set flag to read config file */
|
||||
pqsignal(SIGINT, SIG_IGN);
|
||||
@ -777,8 +778,7 @@ WalRcvDie(int code, Datum arg)
|
||||
/* Ensure that all WAL records received are flushed to disk */
|
||||
XLogWalRcvFlush(true);
|
||||
|
||||
walrcv->latch = NULL;
|
||||
|
||||
/* Mark ourselves inactive in shared memory */
|
||||
SpinLockAcquire(&walrcv->mutex);
|
||||
Assert(walrcv->walRcvState == WALRCV_STREAMING ||
|
||||
walrcv->walRcvState == WALRCV_RESTARTING ||
|
||||
@ -789,6 +789,7 @@ WalRcvDie(int code, Datum arg)
|
||||
walrcv->walRcvState = WALRCV_STOPPED;
|
||||
walrcv->pid = 0;
|
||||
walrcv->ready_to_display = false;
|
||||
walrcv->latch = NULL;
|
||||
SpinLockRelease(&walrcv->mutex);
|
||||
|
||||
/* Terminate the connection gracefully. */
|
||||
@ -1344,9 +1345,15 @@ ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
|
||||
void
|
||||
WalRcvForceReply(void)
|
||||
{
|
||||
Latch *latch;
|
||||
|
||||
WalRcv->force_reply = true;
|
||||
if (WalRcv->latch)
|
||||
SetLatch(WalRcv->latch);
|
||||
/* fetching the latch pointer might not be atomic, so use spinlock */
|
||||
SpinLockAcquire(&WalRcv->mutex);
|
||||
latch = WalRcv->latch;
|
||||
SpinLockRelease(&WalRcv->mutex);
|
||||
if (latch)
|
||||
SetLatch(latch);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -226,6 +226,7 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
|
||||
WalRcvData *walrcv = WalRcv;
|
||||
bool launch = false;
|
||||
pg_time_t now = (pg_time_t) time(NULL);
|
||||
Latch *latch;
|
||||
|
||||
/*
|
||||
* We always start at the beginning of the segment. That prevents a broken
|
||||
@ -274,12 +275,14 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
|
||||
walrcv->receiveStart = recptr;
|
||||
walrcv->receiveStartTLI = tli;
|
||||
|
||||
latch = walrcv->latch;
|
||||
|
||||
SpinLockRelease(&walrcv->mutex);
|
||||
|
||||
if (launch)
|
||||
SendPostmasterSignal(PMSIGNAL_START_WALRECEIVER);
|
||||
else if (walrcv->latch)
|
||||
SetLatch(walrcv->latch);
|
||||
else if (latch)
|
||||
SetLatch(latch);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -117,14 +117,6 @@ typedef struct
|
||||
/* set true once conninfo is ready to display (obfuscated pwds etc) */
|
||||
bool ready_to_display;
|
||||
|
||||
slock_t mutex; /* locks shared variables shown above */
|
||||
|
||||
/*
|
||||
* force walreceiver reply? This doesn't need to be locked; memory
|
||||
* barriers for ordering are sufficient.
|
||||
*/
|
||||
bool force_reply;
|
||||
|
||||
/*
|
||||
* Latch used by startup process to wake up walreceiver after telling it
|
||||
* where to start streaming (after setting receiveStart and
|
||||
@ -133,6 +125,15 @@ typedef struct
|
||||
* normally mapped to procLatch when walreceiver is running.
|
||||
*/
|
||||
Latch *latch;
|
||||
|
||||
slock_t mutex; /* locks shared variables shown above */
|
||||
|
||||
/*
|
||||
* force walreceiver reply? This doesn't need to be locked; memory
|
||||
* barriers for ordering are sufficient. But we do need atomic fetch and
|
||||
* store semantics, so use sig_atomic_t.
|
||||
*/
|
||||
sig_atomic_t force_reply; /* used as a bool */
|
||||
} WalRcvData;
|
||||
|
||||
extern WalRcvData *WalRcv;
|
||||
|
Loading…
x
Reference in New Issue
Block a user