mirror of
https://github.com/postgres/postgres.git
synced 2025-09-02 04:21:28 +03:00
Cleanup slots during drop database
Automatically drop all logical replication slots associated with a database when the database is dropped. Previously we threw an ERROR if a slot existed. Now we throw ERROR only if a slot is active in the database being dropped. Craig Ringer
This commit is contained in:
@@ -796,6 +796,94 @@ ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* ReplicationSlotsDropDBSlots -- Drop all db-specific slots relating to the
|
||||
* passed database oid. The caller should hold an exclusive lock on the
|
||||
* pg_database oid for the database to prevent creation of new slots on the db
|
||||
* or replay from existing slots.
|
||||
*
|
||||
* This routine isn't as efficient as it could be - but we don't drop databases
|
||||
* often, especially databases with lots of slots.
|
||||
*
|
||||
* Another session that concurrently acquires an existing slot on the target DB
|
||||
* (most likely to drop it) may cause this function to ERROR. If that happens
|
||||
* it may have dropped some but not all slots.
|
||||
*/
|
||||
void
|
||||
ReplicationSlotsDropDBSlots(Oid dboid)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (max_replication_slots <= 0)
|
||||
return;
|
||||
|
||||
restart:
|
||||
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
|
||||
for (i = 0; i < max_replication_slots; i++)
|
||||
{
|
||||
ReplicationSlot *s;
|
||||
NameData slotname;
|
||||
int active_pid;
|
||||
|
||||
s = &ReplicationSlotCtl->replication_slots[i];
|
||||
|
||||
/* cannot change while ReplicationSlotCtlLock is held */
|
||||
if (!s->in_use)
|
||||
continue;
|
||||
|
||||
/* only logical slots are database specific, skip */
|
||||
if (!SlotIsLogical(s))
|
||||
continue;
|
||||
|
||||
/* not our database, skip */
|
||||
if (s->data.database != dboid)
|
||||
continue;
|
||||
|
||||
/* Claim the slot, as if ReplicationSlotAcquire()ing. */
|
||||
SpinLockAcquire(&s->mutex);
|
||||
strncpy(NameStr(slotname), NameStr(s->data.name), NAMEDATALEN);
|
||||
NameStr(slotname)[NAMEDATALEN-1] = '\0';
|
||||
active_pid = s->active_pid;
|
||||
if (active_pid == 0)
|
||||
{
|
||||
MyReplicationSlot = s;
|
||||
s->active_pid = MyProcPid;
|
||||
}
|
||||
SpinLockRelease(&s->mutex);
|
||||
|
||||
/*
|
||||
* We might fail here if the slot was active. Even though we hold an
|
||||
* exclusive lock on the database object a logical slot for that DB can
|
||||
* still be active if it's being dropped by a backend connected to
|
||||
* another DB or is otherwise acquired.
|
||||
*
|
||||
* It's an unlikely race that'll only arise from concurrent user action,
|
||||
* so we'll just bail out.
|
||||
*/
|
||||
if (active_pid)
|
||||
elog(ERROR, "replication slot %s is in use by pid %d",
|
||||
NameStr(slotname), active_pid);
|
||||
|
||||
/*
|
||||
* To avoid largely duplicating ReplicationSlotDropAcquired() or
|
||||
* complicating it with already_locked flags for ProcArrayLock,
|
||||
* ReplicationSlotControlLock and ReplicationSlotAllocationLock, we
|
||||
* just release our ReplicationSlotControlLock to drop the slot.
|
||||
*
|
||||
* For safety we'll restart our scan from the beginning each
|
||||
* time we release the lock.
|
||||
*/
|
||||
LWLockRelease(ReplicationSlotControlLock);
|
||||
ReplicationSlotDropAcquired();
|
||||
goto restart;
|
||||
}
|
||||
LWLockRelease(ReplicationSlotControlLock);
|
||||
|
||||
/* recompute limits once after all slots are dropped */
|
||||
ReplicationSlotsComputeRequiredXmin(false);
|
||||
ReplicationSlotsComputeRequiredLSN();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Check whether the server's configuration supports using replication
|
||||
|
Reference in New Issue
Block a user