diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 442a46140db..b0e89ace5e2 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -190,7 +190,8 @@ brininsert(Relation idxRel, Datum *values, bool *nulls, AutoVacuumRequestWork(AVW_BRINSummarizeRange, RelationGetRelid(idxRel), lastPageRange); - brin_free_tuple(lastPageTuple); + else + LockBuffer(buf, BUFFER_LOCK_UNLOCK); } brtup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off, diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c index fc8b10ab396..e778cbcacdc 100644 --- a/src/backend/access/brin/brin_revmap.c +++ b/src/backend/access/brin/brin_revmap.c @@ -179,13 +179,16 @@ brinSetHeapBlockItemptr(Buffer buf, BlockNumber pagesPerRange, /* * Fetch the BrinTuple for a given heap block. * - * The buffer containing the tuple is locked, and returned in *buf. As an - * optimization, the caller can pass a pinned buffer *buf on entry, which will - * avoid a pin-unpin cycle when the next tuple is on the same page as a - * previous one. + * The buffer containing the tuple is locked, and returned in *buf. The + * returned tuple points to the shared buffer and must not be freed; if caller + * wants to use it after releasing the buffer lock, it must create its own + * palloc'ed copy. As an optimization, the caller can pass a pinned buffer + * *buf on entry, which will avoid a pin-unpin cycle when the next tuple is on + * the same page as a previous one. * * If no tuple is found for the given heap range, returns NULL. In that case, - * *buf might still be updated, but it's not locked. + * *buf might still be updated (and pin must be released by caller), but it's + * not locked. * * The output tuple offset within the buffer is returned in *off, and its size * is returned in *size.