mirror of
https://github.com/postgres/postgres.git
synced 2025-11-22 12:22:45 +03:00
Get rid of our dependency on type "long" for memory size calculations.
Consistently use "Size" (or size_t, or in some places int64 or double) as the type for variables holding memory allocation sizes. In most places variables' data types were fine already, but we had an ancient habit of computing bytes from kilobytes-units GUCs with code like "work_mem * 1024L". That risks overflow on Win64 where they did not make "long" as wide as "size_t". We worked around that by restricting such GUCs' ranges, so you couldn't set work_mem et al higher than 2GB on Win64. This patch removes that restriction, after replacing such calculations with "work_mem * (Size) 1024" or variants of that. It should be noted that this patch was constructed by searching outwards from the GUCs that have MAX_KILOBYTES as upper limit. So I can't positively guarantee there are no other places doing memory-size arithmetic in int or long variables. I do however feel pretty confident that increasing MAX_KILOBYTES on Win64 is safe now. Also, nothing in our code should be dealing in multiple-gigabyte allocations without authorization from a relevant GUC, so it seems pretty likely that this search caught everything that could be at risk of overflow. Author: Vladlen Popolitov <v.popolitov@postgrespro.ru> Co-authored-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/1a01f0-66ec2d80-3b-68487680@27595217
This commit is contained in:
@@ -39,7 +39,7 @@
|
||||
int gin_pending_list_limit = 0;
|
||||
|
||||
#define GIN_PAGE_FREESIZE \
|
||||
( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
|
||||
( (Size) BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
|
||||
|
||||
typedef struct KeyArray
|
||||
{
|
||||
@@ -456,7 +456,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
|
||||
* ginInsertCleanup() should not be called inside our CRIT_SECTION.
|
||||
*/
|
||||
cleanupSize = GinGetPendingListCleanupSize(index);
|
||||
if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * 1024L)
|
||||
if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * (Size) 1024)
|
||||
needCleanup = true;
|
||||
|
||||
UnlockReleaseBuffer(metabuffer);
|
||||
@@ -795,7 +795,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
blknoFinish;
|
||||
bool cleanupFinish = false;
|
||||
bool fsm_vac = false;
|
||||
Size workMemory;
|
||||
int workMemory;
|
||||
|
||||
/*
|
||||
* We would like to prevent concurrent cleanup process. For that we will
|
||||
@@ -901,7 +901,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
*/
|
||||
if (GinPageGetOpaque(page)->rightlink == InvalidBlockNumber ||
|
||||
(GinPageHasFullRow(page) &&
|
||||
(accum.allocatedMemory >= workMemory * 1024L)))
|
||||
accum.allocatedMemory >= workMemory * (Size) 1024))
|
||||
{
|
||||
ItemPointerData *list;
|
||||
uint32 nlist;
|
||||
|
||||
@@ -125,7 +125,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
|
||||
CompactAttribute *attr;
|
||||
|
||||
/* Initialize empty bitmap result */
|
||||
scanEntry->matchBitmap = tbm_create(work_mem * 1024L, NULL);
|
||||
scanEntry->matchBitmap = tbm_create(work_mem * (Size) 1024, NULL);
|
||||
|
||||
/* Null query cannot partial-match anything */
|
||||
if (scanEntry->isPartialMatch &&
|
||||
|
||||
@@ -288,7 +288,7 @@ ginBuildCallback(Relation index, ItemPointer tid, Datum *values,
|
||||
values[i], isnull[i], tid);
|
||||
|
||||
/* If we've maxed out our available memory, dump everything to the index */
|
||||
if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
|
||||
if (buildstate->accum.allocatedMemory >= maintenance_work_mem * (Size) 1024)
|
||||
{
|
||||
ItemPointerData *list;
|
||||
Datum key;
|
||||
|
||||
@@ -120,7 +120,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
|
||||
double reltuples;
|
||||
double allvisfrac;
|
||||
uint32 num_buckets;
|
||||
long sort_threshold;
|
||||
Size sort_threshold;
|
||||
HashBuildState buildstate;
|
||||
|
||||
/*
|
||||
@@ -155,13 +155,13 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
|
||||
* one page. Also, "initial index size" accounting does not include the
|
||||
* metapage, nor the first bitmap page.
|
||||
*/
|
||||
sort_threshold = (maintenance_work_mem * 1024L) / BLCKSZ;
|
||||
sort_threshold = (maintenance_work_mem * (Size) 1024) / BLCKSZ;
|
||||
if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
|
||||
sort_threshold = Min(sort_threshold, NBuffers);
|
||||
else
|
||||
sort_threshold = Min(sort_threshold, NLocBuffer);
|
||||
|
||||
if (num_buckets >= (uint32) sort_threshold)
|
||||
if (num_buckets >= sort_threshold)
|
||||
buildstate.spool = _h_spoolinit(heap, index, num_buckets);
|
||||
else
|
||||
buildstate.spool = NULL;
|
||||
|
||||
@@ -2070,7 +2070,7 @@ lazy_vacuum(LVRelState *vacrel)
|
||||
*/
|
||||
threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
|
||||
bypass = (vacrel->lpdead_item_pages < threshold &&
|
||||
(TidStoreMemoryUsage(vacrel->dead_items) < (32L * 1024L * 1024L)));
|
||||
TidStoreMemoryUsage(vacrel->dead_items) < 32 * 1024 * 1024);
|
||||
}
|
||||
|
||||
if (bypass)
|
||||
@@ -3037,7 +3037,7 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
|
||||
*/
|
||||
|
||||
dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
|
||||
dead_items_info->max_bytes = vac_work_mem * 1024L;
|
||||
dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
|
||||
dead_items_info->num_items = 0;
|
||||
vacrel->dead_items_info = dead_items_info;
|
||||
|
||||
|
||||
@@ -2953,7 +2953,7 @@ _bt_lock_subtree_parent(Relation rel, Relation heaprel, BlockNumber child,
|
||||
void
|
||||
_bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
|
||||
{
|
||||
int64 maxbufsize;
|
||||
Size maxbufsize;
|
||||
|
||||
/*
|
||||
* Don't bother with optimization in cleanup-only case -- we don't expect
|
||||
@@ -2969,12 +2969,13 @@ _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
|
||||
* int overflow here.
|
||||
*/
|
||||
vstate->bufsize = 256;
|
||||
maxbufsize = (work_mem * 1024L) / sizeof(BTPendingFSM);
|
||||
maxbufsize = Min(maxbufsize, INT_MAX);
|
||||
maxbufsize = (work_mem * (Size) 1024) / sizeof(BTPendingFSM);
|
||||
maxbufsize = Min(maxbufsize, MaxAllocSize / sizeof(BTPendingFSM));
|
||||
/* BTVacState.maxbufsize has type int */
|
||||
maxbufsize = Min(maxbufsize, INT_MAX);
|
||||
/* Stay sane with small work_mem */
|
||||
maxbufsize = Max(maxbufsize, vstate->bufsize);
|
||||
vstate->maxbufsize = maxbufsize;
|
||||
vstate->maxbufsize = (int) maxbufsize;
|
||||
|
||||
/* Allocate buffer, indicate that there are currently 0 pending pages */
|
||||
vstate->pendingpages = palloc(sizeof(BTPendingFSM) * vstate->bufsize);
|
||||
|
||||
Reference in New Issue
Block a user