1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-02 04:21:28 +03:00

Revert "Display the time when the process started waiting for the lock, in pg_locks."

This reverts commit 3b733fcd04.

Per buildfarm members prion and rorqual.
This commit is contained in:
Fujii Masao
2021-02-09 18:30:40 +09:00
parent 3b733fcd04
commit 890d2182a2
11 changed files with 10 additions and 85 deletions

View File

@@ -540,34 +540,12 @@ void
ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict)
{
TimestampTz ltime;
TimestampTz now;
Assert(InHotStandby);
ltime = GetStandbyLimitTime();
now = GetCurrentTimestamp();
/*
* Update waitStart if first time through after the startup process
* started waiting for the lock. It should not be updated every time
* ResolveRecoveryConflictWithLock() is called during the wait.
*
* Use the current time obtained for comparison with ltime as waitStart
* (i.e., the time when this process started waiting for the lock). Since
* getting the current time newly can cause overhead, we reuse the
* already-obtained time to avoid that overhead.
*
* Note that waitStart is updated without holding the lock table's
* partition lock, to avoid the overhead by additional lock acquisition.
* This can cause "waitstart" in pg_locks to become NULL for a very short
* period of time after the wait started even though "granted" is false.
* This is OK in practice because we can assume that users are likely to
* look at "waitstart" when waiting for the lock for a long time.
*/
if (pg_atomic_read_u64(&MyProc->waitStart) == 0)
pg_atomic_write_u64(&MyProc->waitStart, now);
if (now >= ltime && ltime != 0)
if (GetCurrentTimestamp() >= ltime && ltime != 0)
{
/*
* We're already behind, so clear a path as quickly as possible.

View File

@@ -3619,12 +3619,6 @@ GetLockStatusData(void)
instance->leaderPid = proc->pid;
instance->fastpath = true;
/*
* Successfully taking fast path lock means there were no
* conflicting locks.
*/
instance->waitStart = 0;
el++;
}
@@ -3652,7 +3646,6 @@ GetLockStatusData(void)
instance->pid = proc->pid;
instance->leaderPid = proc->pid;
instance->fastpath = true;
instance->waitStart = 0;
el++;
}
@@ -3705,7 +3698,6 @@ GetLockStatusData(void)
instance->pid = proc->pid;
instance->leaderPid = proclock->groupLeader->pid;
instance->fastpath = false;
instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
el++;
}

View File

@@ -402,7 +402,6 @@ InitProcess(void)
MyProc->lwWaitMode = 0;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
pg_atomic_init_u64(&MyProc->waitStart, 0);
#ifdef USE_ASSERT_CHECKING
{
int i;
@@ -1263,23 +1262,6 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
}
else
enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
/*
* Use the current time obtained for the deadlock timeout timer as
* waitStart (i.e., the time when this process started waiting for the
* lock). Since getting the current time newly can cause overhead, we
* reuse the already-obtained time to avoid that overhead.
*
* Note that waitStart is updated without holding the lock table's
* partition lock, to avoid the overhead by additional lock
* acquisition. This can cause "waitstart" in pg_locks to become NULL
* for a very short period of time after the wait started even though
* "granted" is false. This is OK in practice because we can assume
* that users are likely to look at "waitstart" when waiting for the
* lock for a long time.
*/
pg_atomic_write_u64(&MyProc->waitStart,
get_timeout_start_time(DEADLOCK_TIMEOUT));
}
else if (log_recovery_conflict_waits)
{
@@ -1696,7 +1678,6 @@ ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
proc->waitLock = NULL;
proc->waitProcLock = NULL;
proc->waitStatus = waitStatus;
pg_atomic_write_u64(&MyProc->waitStart, 0);
/* And awaken it */
SetLatch(&proc->procLatch);