mirror of
https://github.com/postgres/postgres.git
synced 2025-07-03 20:02:46 +03:00
Don't be so trusting that shm_toc_lookup() will always succeed.
Given the possibility of race conditions and so on, it seems entirely unsafe to just assume that shm_toc_lookup() always finds the key it's looking for --- but that was exactly what all but one call site were doing. To fix, add a "bool noError" argument, similarly to what we have in many other functions, and throw an error on an unexpected lookup failure. Remove now-redundant Asserts that a rather random subset of call sites had. I doubt this will throw any light on buildfarm member lorikeet's recent failures, because if an unnoticed lookup failure were involved, you'd kind of expect a null-pointer-dereference crash rather than the observed symptom. But you never know ... and this is better coding practice even if it never catches anything. Discussion: https://postgr.es/m/9697.1496675981@sss.pgh.pa.us
This commit is contained in:
@ -341,7 +341,7 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
|
||||
mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
|
||||
pcxt->nworkers));
|
||||
else
|
||||
tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
|
||||
tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, false);
|
||||
|
||||
/* Create the queues, and become the receiver for each. */
|
||||
for (i = 0; i < pcxt->nworkers; ++i)
|
||||
@ -684,7 +684,7 @@ ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
|
||||
char *mqspace;
|
||||
shm_mq *mq;
|
||||
|
||||
mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE);
|
||||
mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE, false);
|
||||
mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
|
||||
mq = (shm_mq *) mqspace;
|
||||
shm_mq_set_sender(mq, MyProc);
|
||||
@ -705,14 +705,14 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
|
||||
char *queryString;
|
||||
|
||||
/* Get the query string from shared memory */
|
||||
queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT);
|
||||
queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false);
|
||||
|
||||
/* Reconstruct leader-supplied PlannedStmt. */
|
||||
pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT);
|
||||
pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT, false);
|
||||
pstmt = (PlannedStmt *) stringToNode(pstmtspace);
|
||||
|
||||
/* Reconstruct ParamListInfo. */
|
||||
paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS);
|
||||
paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS, false);
|
||||
paramLI = RestoreParamList(¶mspace);
|
||||
|
||||
/*
|
||||
@ -843,7 +843,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
|
||||
|
||||
/* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
|
||||
receiver = ExecParallelGetReceiver(seg, toc);
|
||||
instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION);
|
||||
instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
|
||||
if (instrumentation != NULL)
|
||||
instrument_options = instrumentation->instrument_options;
|
||||
queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
|
||||
@ -858,7 +858,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
|
||||
InstrStartParallelQuery();
|
||||
|
||||
/* Attach to the dynamic shared memory area. */
|
||||
area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA);
|
||||
area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
|
||||
area = dsa_attach_in_place(area_space, seg);
|
||||
|
||||
/* Start up the executor */
|
||||
@ -875,7 +875,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
|
||||
ExecutorFinish(queryDesc);
|
||||
|
||||
/* Report buffer usage during parallel execution. */
|
||||
buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE);
|
||||
buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
|
||||
InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber]);
|
||||
|
||||
/* Report instrumentation data if any instrumentation options are set. */
|
||||
|
Reference in New Issue
Block a user