mirror of
https://github.com/postgres/postgres.git
synced 2025-04-22 23:02:54 +03:00
Restore sane locking behavior during parallel query.
Commit 9a3cebeaa changed things so that parallel workers didn't obtain any lock of their own on tables they access. That was clearly a bad idea, but I'd mistakenly supposed that it was the intended end result of the series of patches for simplifying the executor's lock management. Undo that change in relation_open(), and adjust ExecOpenScanRelation() so that it gets the correct lock if inside a parallel worker. In passing, clean up some more obsolete comments about when locks are acquired. Discussion: https://postgr.es/m/468c85d9-540e-66a2-1dde-fec2b741e688@lab.ntt.co.jp
This commit is contained in:
parent
f2343653f5
commit
29ef2b310d
@ -1140,13 +1140,9 @@ relation_open(Oid relationId, LOCKMODE lockmode)
|
||||
/*
|
||||
* If we didn't get the lock ourselves, assert that caller holds one,
|
||||
* except in bootstrap mode where no locks are used.
|
||||
*
|
||||
* Also, parallel workers currently assume that their parent holds locks
|
||||
* for tables used in the parallel query (a mighty shaky assumption).
|
||||
*/
|
||||
Assert(lockmode != NoLock ||
|
||||
IsBootstrapProcessingMode() ||
|
||||
IsParallelWorker() ||
|
||||
CheckRelationLockedByMe(r, AccessShareLock, true));
|
||||
|
||||
/* Make note that we've accessed a temporary relation */
|
||||
|
@ -1622,8 +1622,8 @@ ExecEndPlan(PlanState *planstate, EState *estate)
|
||||
}
|
||||
|
||||
/*
|
||||
* close whatever rangetable Relations have been opened. We did not
|
||||
* acquire locks in ExecGetRangeTableRelation, so don't release 'em here.
|
||||
* close whatever rangetable Relations have been opened. We do not
|
||||
* release any locks we might hold on those rels.
|
||||
*/
|
||||
num_relations = estate->es_range_table_size;
|
||||
for (i = 0; i < num_relations; i++)
|
||||
|
@ -732,16 +732,30 @@ ExecGetRangeTableRelation(EState *estate, Index rti)
|
||||
|
||||
Assert(rte->rtekind == RTE_RELATION);
|
||||
|
||||
rel = estate->es_relations[rti - 1] = heap_open(rte->relid, NoLock);
|
||||
if (!IsParallelWorker())
|
||||
{
|
||||
/*
|
||||
* In a normal query, we should already have the appropriate lock,
|
||||
* but verify that through an Assert. Since there's already an
|
||||
* Assert inside heap_open that insists on holding some lock, it
|
||||
* seems sufficient to check this only when rellockmode is higher
|
||||
* than the minimum.
|
||||
*/
|
||||
rel = heap_open(rte->relid, NoLock);
|
||||
Assert(rte->rellockmode == AccessShareLock ||
|
||||
CheckRelationLockedByMe(rel, rte->rellockmode, false));
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If we are a parallel worker, we need to obtain our own local
|
||||
* lock on the relation. This ensures sane behavior in case the
|
||||
* parent process exits before we do.
|
||||
*/
|
||||
rel = heap_open(rte->relid, rte->rellockmode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify that appropriate lock was obtained before execution.
|
||||
*
|
||||
* In the case of parallel query, only the leader would've obtained
|
||||
* the lock (that needs to be fixed, though).
|
||||
*/
|
||||
Assert(IsParallelWorker() ||
|
||||
CheckRelationLockedByMe(rel, rte->rellockmode, false));
|
||||
estate->es_relations[rti - 1] = rel;
|
||||
}
|
||||
|
||||
return rel;
|
||||
|
@ -899,16 +899,12 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
|
||||
ExecAssignExprContext(estate, &scanstate->ss.ps);
|
||||
|
||||
/*
|
||||
* open the base relation and acquire appropriate lock on it.
|
||||
* open the scan relation
|
||||
*/
|
||||
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
|
||||
|
||||
/*
|
||||
* initialize child nodes
|
||||
*
|
||||
* We do this after ExecOpenScanRelation because the child nodes will open
|
||||
* indexscans on our relation's indexes, and we want to be sure we have
|
||||
* acquired a lock on the relation first.
|
||||
*/
|
||||
outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags);
|
||||
|
||||
|
@ -55,7 +55,7 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags)
|
||||
ExecAssignExprContext(estate, &css->ss.ps);
|
||||
|
||||
/*
|
||||
* open the base relation, if any, and acquire an appropriate lock on it
|
||||
* open the scan relation, if any
|
||||
*/
|
||||
if (scanrelid > 0)
|
||||
{
|
||||
|
@ -156,8 +156,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
|
||||
ExecAssignExprContext(estate, &scanstate->ss.ps);
|
||||
|
||||
/*
|
||||
* open the base relation, if any, and acquire an appropriate lock on it;
|
||||
* also acquire function pointers from the FDW's handler
|
||||
* open the scan relation, if any; also acquire function pointers from the
|
||||
* FDW's handler
|
||||
*/
|
||||
if (scanrelid > 0)
|
||||
{
|
||||
|
@ -511,7 +511,7 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
|
||||
ExecAssignExprContext(estate, &indexstate->ss.ps);
|
||||
|
||||
/*
|
||||
* open the base relation and acquire appropriate lock on it.
|
||||
* open the scan relation
|
||||
*/
|
||||
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
|
||||
|
||||
|
@ -933,7 +933,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
|
||||
ExecAssignExprContext(estate, &indexstate->ss.ps);
|
||||
|
||||
/*
|
||||
* open the base relation and acquire appropriate lock on it.
|
||||
* open the scan relation
|
||||
*/
|
||||
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
|
||||
|
||||
|
@ -134,10 +134,7 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags)
|
||||
ExecAssignExprContext(estate, &scanstate->ss.ps);
|
||||
|
||||
/*
|
||||
* Initialize scan relation.
|
||||
*
|
||||
* Get the relation object id from the relid'th entry in the range table,
|
||||
* open that relation and acquire appropriate lock on it.
|
||||
* open the scan relation
|
||||
*/
|
||||
scanstate->ss.ss_currentRelation =
|
||||
ExecOpenScanRelation(estate,
|
||||
|
@ -163,10 +163,7 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
|
||||
ExecAssignExprContext(estate, &scanstate->ss.ps);
|
||||
|
||||
/*
|
||||
* Initialize scan relation.
|
||||
*
|
||||
* Get the relation object id from the relid'th entry in the range table,
|
||||
* open that relation and acquire appropriate lock on it.
|
||||
* open the scan relation
|
||||
*/
|
||||
scanstate->ss.ss_currentRelation =
|
||||
ExecOpenScanRelation(estate,
|
||||
|
@ -531,7 +531,7 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags)
|
||||
tidstate->tss_TidPtr = -1;
|
||||
|
||||
/*
|
||||
* open the base relation and acquire appropriate lock on it.
|
||||
* open the scan relation
|
||||
*/
|
||||
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user