1
0
mirror of https://github.com/postgres/postgres.git synced 2025-05-02 11:44:50 +03:00

Assorted examples of expanded type-safer palloc/pg_malloc API

This adds some uses of the new palloc/pg_malloc variants here and
there as a demonstration and test.  This is kept separate from the
actual API patch, since the latter might be backpatched at some point.

Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us>
Discussion: https://www.postgresql.org/message-id/flat/bb755632-2a43-d523-36f8-a1e7a389a907@enterprisedb.com
This commit is contained in:
Peter Eisentraut 2022-09-12 08:31:56 +02:00
parent 2016055a92
commit 5015e1e1b5
12 changed files with 90 additions and 111 deletions

View File

@ -972,7 +972,7 @@ materializeResult(FunctionCallInfo fcinfo, PGconn *conn, PGresult *res)
rsinfo->setDesc = tupdesc; rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext); MemoryContextSwitchTo(oldcontext);
values = (char **) palloc(nfields * sizeof(char *)); values = palloc_array(char *, nfields);
/* put all tuples into the tuplestore */ /* put all tuples into the tuplestore */
for (row = 0; row < ntuples; row++) for (row = 0; row < ntuples; row++)
@ -1276,7 +1276,7 @@ storeRow(volatile storeInfo *sinfo, PGresult *res, bool first)
*/ */
if (sinfo->cstrs) if (sinfo->cstrs)
pfree(sinfo->cstrs); pfree(sinfo->cstrs);
sinfo->cstrs = (char **) palloc(nfields * sizeof(char *)); sinfo->cstrs = palloc_array(char *, nfields);
} }
/* Should have a single-row result if we get here */ /* Should have a single-row result if we get here */
@ -1618,7 +1618,7 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
HeapTuple tuple; HeapTuple tuple;
Datum result; Datum result;
values = (char **) palloc(2 * sizeof(char *)); values = palloc_array(char *, 2);
values[0] = psprintf("%d", call_cntr + 1); values[0] = psprintf("%d", call_cntr + 1);
values[1] = results[call_cntr]; values[1] = results[call_cntr];
@ -2083,7 +2083,7 @@ get_pkey_attnames(Relation rel, int16 *indnkeyatts)
*indnkeyatts = index->indnkeyatts; *indnkeyatts = index->indnkeyatts;
if (*indnkeyatts > 0) if (*indnkeyatts > 0)
{ {
result = (char **) palloc(*indnkeyatts * sizeof(char *)); result = palloc_array(char *, *indnkeyatts);
for (i = 0; i < *indnkeyatts; i++) for (i = 0; i < *indnkeyatts; i++)
result[i] = SPI_fname(tupdesc, index->indkey.values[i]); result[i] = SPI_fname(tupdesc, index->indkey.values[i]);
@ -2124,7 +2124,7 @@ get_text_array_contents(ArrayType *array, int *numitems)
get_typlenbyvalalign(ARR_ELEMTYPE(array), get_typlenbyvalalign(ARR_ELEMTYPE(array),
&typlen, &typbyval, &typalign); &typlen, &typbyval, &typalign);
values = (char **) palloc(nitems * sizeof(char *)); values = palloc_array(char *, nitems);
ptr = ARR_DATA_PTR(array); ptr = ARR_DATA_PTR(array);
bitmap = ARR_NULLBITMAP(array); bitmap = ARR_NULLBITMAP(array);
@ -2928,7 +2928,7 @@ validate_pkattnums(Relation rel,
errmsg("number of key attributes must be > 0"))); errmsg("number of key attributes must be > 0")));
/* Allocate output array */ /* Allocate output array */
*pkattnums = (int *) palloc(pknumatts_arg * sizeof(int)); *pkattnums = palloc_array(int, pknumatts_arg);
*pknumatts = pknumatts_arg; *pknumatts = pknumatts_arg;
/* Validate attnums and convert to internal form */ /* Validate attnums and convert to internal form */

View File

@ -329,7 +329,7 @@ brinbeginscan(Relation r, int nkeys, int norderbys)
scan = RelationGetIndexScan(r, nkeys, norderbys); scan = RelationGetIndexScan(r, nkeys, norderbys);
opaque = (BrinOpaque *) palloc(sizeof(BrinOpaque)); opaque = palloc_object(BrinOpaque);
opaque->bo_rmAccess = brinRevmapInitialize(r, &opaque->bo_pagesPerRange, opaque->bo_rmAccess = brinRevmapInitialize(r, &opaque->bo_pagesPerRange,
scan->xs_snapshot); scan->xs_snapshot);
opaque->bo_bdesc = brin_build_desc(r); opaque->bo_bdesc = brin_build_desc(r);
@ -394,7 +394,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
* don't look them up here; we do that lazily the first time we see a scan * don't look them up here; we do that lazily the first time we see a scan
* key reference each of them. We rely on zeroing fn_oid to InvalidOid. * key reference each of them. We rely on zeroing fn_oid to InvalidOid.
*/ */
consistentFn = palloc0(sizeof(FmgrInfo) * bdesc->bd_tupdesc->natts); consistentFn = palloc0_array(FmgrInfo, bdesc->bd_tupdesc->natts);
/* /*
* Make room for per-attribute lists of scan keys that we'll pass to the * Make room for per-attribute lists of scan keys that we'll pass to the
@ -881,7 +881,7 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
/* /*
* Return statistics * Return statistics
*/ */
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); result = palloc_object(IndexBuildResult);
result->heap_tuples = reltuples; result->heap_tuples = reltuples;
result->index_tuples = idxtuples; result->index_tuples = idxtuples;
@ -925,7 +925,7 @@ brinbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
{ {
/* allocate stats if first time through, else re-use existing struct */ /* allocate stats if first time through, else re-use existing struct */
if (stats == NULL) if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); stats = palloc0_object(IndexBulkDeleteResult);
return stats; return stats;
} }
@ -944,7 +944,7 @@ brinvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
return stats; return stats;
if (!stats) if (!stats)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); stats = palloc0_object(IndexBulkDeleteResult);
stats->num_pages = RelationGetNumberOfBlocks(info->index); stats->num_pages = RelationGetNumberOfBlocks(info->index);
/* rest of stats is initialized by zeroing */ /* rest of stats is initialized by zeroing */
@ -1204,7 +1204,7 @@ brin_build_desc(Relation rel)
* Obtain BrinOpcInfo for each indexed column. While at it, accumulate * Obtain BrinOpcInfo for each indexed column. While at it, accumulate
* the number of columns stored, since the number is opclass-defined. * the number of columns stored, since the number is opclass-defined.
*/ */
opcinfo = (BrinOpcInfo **) palloc(sizeof(BrinOpcInfo *) * tupdesc->natts); opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts);
for (keyno = 0; keyno < tupdesc->natts; keyno++) for (keyno = 0; keyno < tupdesc->natts; keyno++)
{ {
FmgrInfo *opcInfoFn; FmgrInfo *opcInfoFn;
@ -1276,7 +1276,7 @@ initialize_brin_buildstate(Relation idxRel, BrinRevmap *revmap,
{ {
BrinBuildState *state; BrinBuildState *state;
state = palloc(sizeof(BrinBuildState)); state = palloc_object(BrinBuildState);
state->bs_irel = idxRel; state->bs_irel = idxRel;
state->bs_numtuples = 0; state->bs_numtuples = 0;

View File

@ -505,7 +505,7 @@ ginHeapTupleFastCollect(GinState *ginstate,
* resizing (since palloc likes powers of 2). * resizing (since palloc likes powers of 2).
*/ */
collector->lentuples = pg_nextpower2_32(Max(16, nentries)); collector->lentuples = pg_nextpower2_32(Max(16, nentries));
collector->tuples = (IndexTuple *) palloc(sizeof(IndexTuple) * collector->lentuples); collector->tuples = palloc_array(IndexTuple, collector->lentuples);
} }
else if (collector->lentuples < collector->ntuples + nentries) else if (collector->lentuples < collector->ntuples + nentries)
{ {
@ -515,8 +515,8 @@ ginHeapTupleFastCollect(GinState *ginstate,
* MaxAllocSize/sizeof(IndexTuple), causing an error in repalloc. * MaxAllocSize/sizeof(IndexTuple), causing an error in repalloc.
*/ */
collector->lentuples = pg_nextpower2_32(collector->ntuples + nentries); collector->lentuples = pg_nextpower2_32(collector->ntuples + nentries);
collector->tuples = (IndexTuple *) repalloc(collector->tuples, collector->tuples = repalloc_array(collector->tuples,
sizeof(IndexTuple) * collector->lentuples); IndexTuple, collector->lentuples);
} }
/* /*
@ -665,9 +665,8 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
static void static void
initKeyArray(KeyArray *keys, int32 maxvalues) initKeyArray(KeyArray *keys, int32 maxvalues)
{ {
keys->keys = (Datum *) palloc(sizeof(Datum) * maxvalues); keys->keys = palloc_array(Datum, maxvalues);
keys->categories = (GinNullCategory *) keys->categories = palloc_array(GinNullCategory, maxvalues);
palloc(sizeof(GinNullCategory) * maxvalues);
keys->nvalues = 0; keys->nvalues = 0;
keys->maxvalues = maxvalues; keys->maxvalues = maxvalues;
} }
@ -679,10 +678,8 @@ addDatum(KeyArray *keys, Datum datum, GinNullCategory category)
if (keys->nvalues >= keys->maxvalues) if (keys->nvalues >= keys->maxvalues)
{ {
keys->maxvalues *= 2; keys->maxvalues *= 2;
keys->keys = (Datum *) keys->keys = repalloc_array(keys->keys, Datum, keys->maxvalues);
repalloc(keys->keys, sizeof(Datum) * keys->maxvalues); keys->categories = repalloc_array(keys->categories, GinNullCategory, keys->maxvalues);
keys->categories = (GinNullCategory *)
repalloc(keys->categories, sizeof(GinNullCategory) * keys->maxvalues);
} }
keys->keys[keys->nvalues] = datum; keys->keys[keys->nvalues] = datum;

View File

@ -229,10 +229,10 @@ CheckIndexCompatible(Oid oldId,
*/ */
indexInfo = makeIndexInfo(numberOfAttributes, numberOfAttributes, indexInfo = makeIndexInfo(numberOfAttributes, numberOfAttributes,
accessMethodId, NIL, NIL, false, false, false, false); accessMethodId, NIL, NIL, false, false, false, false);
typeObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid)); typeObjectId = palloc_array(Oid, numberOfAttributes);
collationObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid)); collationObjectId = palloc_array(Oid, numberOfAttributes);
classObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid)); classObjectId = palloc_array(Oid, numberOfAttributes);
coloptions = (int16 *) palloc(numberOfAttributes * sizeof(int16)); coloptions = palloc_array(int16, numberOfAttributes);
ComputeIndexAttrs(indexInfo, ComputeIndexAttrs(indexInfo,
typeObjectId, collationObjectId, classObjectId, typeObjectId, collationObjectId, classObjectId,
coloptions, attributeList, coloptions, attributeList,
@ -895,10 +895,10 @@ DefineIndex(Oid relationId,
!concurrent, !concurrent,
concurrent); concurrent);
typeObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid)); typeObjectId = palloc_array(Oid, numberOfAttributes);
collationObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid)); collationObjectId = palloc_array(Oid, numberOfAttributes);
classObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid)); classObjectId = palloc_array(Oid, numberOfAttributes);
coloptions = (int16 *) palloc(numberOfAttributes * sizeof(int16)); coloptions = palloc_array(int16, numberOfAttributes);
ComputeIndexAttrs(indexInfo, ComputeIndexAttrs(indexInfo,
typeObjectId, collationObjectId, classObjectId, typeObjectId, collationObjectId, classObjectId,
coloptions, allIndexParams, coloptions, allIndexParams,
@ -1210,7 +1210,7 @@ DefineIndex(Oid relationId,
if ((!stmt->relation || stmt->relation->inh) && partdesc->nparts > 0) if ((!stmt->relation || stmt->relation->inh) && partdesc->nparts > 0)
{ {
int nparts = partdesc->nparts; int nparts = partdesc->nparts;
Oid *part_oids = palloc(sizeof(Oid) * nparts); Oid *part_oids = palloc_array(Oid, nparts);
bool invalidate_parent = false; bool invalidate_parent = false;
Relation parentIndex; Relation parentIndex;
TupleDesc parentDesc; TupleDesc parentDesc;
@ -1786,9 +1786,9 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
if (exclusionOpNames) if (exclusionOpNames)
{ {
Assert(list_length(exclusionOpNames) == nkeycols); Assert(list_length(exclusionOpNames) == nkeycols);
indexInfo->ii_ExclusionOps = (Oid *) palloc(sizeof(Oid) * nkeycols); indexInfo->ii_ExclusionOps = palloc_array(Oid, nkeycols);
indexInfo->ii_ExclusionProcs = (Oid *) palloc(sizeof(Oid) * nkeycols); indexInfo->ii_ExclusionProcs = palloc_array(Oid, nkeycols);
indexInfo->ii_ExclusionStrats = (uint16 *) palloc(sizeof(uint16) * nkeycols); indexInfo->ii_ExclusionStrats = palloc_array(uint16, nkeycols);
nextExclOp = list_head(exclusionOpNames); nextExclOp = list_head(exclusionOpNames);
} }
else else
@ -2112,7 +2112,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
if (!indexInfo->ii_OpclassOptions) if (!indexInfo->ii_OpclassOptions)
indexInfo->ii_OpclassOptions = indexInfo->ii_OpclassOptions =
palloc0(sizeof(Datum) * indexInfo->ii_NumIndexAttrs); palloc0_array(Datum, indexInfo->ii_NumIndexAttrs);
indexInfo->ii_OpclassOptions[attn] = indexInfo->ii_OpclassOptions[attn] =
transformRelOptions((Datum) 0, attribute->opclassopts, transformRelOptions((Datum) 0, attribute->opclassopts,
@ -3459,7 +3459,7 @@ ReindexRelationConcurrently(Oid relationOid, ReindexParams *params)
/* Save the list of relation OIDs in private context */ /* Save the list of relation OIDs in private context */
oldcontext = MemoryContextSwitchTo(private_context); oldcontext = MemoryContextSwitchTo(private_context);
idx = palloc(sizeof(ReindexIndexInfo)); idx = palloc_object(ReindexIndexInfo);
idx->indexId = cellOid; idx->indexId = cellOid;
/* other fields set later */ /* other fields set later */
@ -3508,7 +3508,7 @@ ReindexRelationConcurrently(Oid relationOid, ReindexParams *params)
*/ */
oldcontext = MemoryContextSwitchTo(private_context); oldcontext = MemoryContextSwitchTo(private_context);
idx = palloc(sizeof(ReindexIndexInfo)); idx = palloc_object(ReindexIndexInfo);
idx->indexId = cellOid; idx->indexId = cellOid;
indexIds = lappend(indexIds, idx); indexIds = lappend(indexIds, idx);
/* other fields set later */ /* other fields set later */
@ -3589,7 +3589,7 @@ ReindexRelationConcurrently(Oid relationOid, ReindexParams *params)
* Save the list of relation OIDs in private context. Note * Save the list of relation OIDs in private context. Note
* that invalid indexes are allowed here. * that invalid indexes are allowed here.
*/ */
idx = palloc(sizeof(ReindexIndexInfo)); idx = palloc_object(ReindexIndexInfo);
idx->indexId = relationOid; idx->indexId = relationOid;
indexIds = lappend(indexIds, idx); indexIds = lappend(indexIds, idx);
/* other fields set later */ /* other fields set later */
@ -3734,7 +3734,7 @@ ReindexRelationConcurrently(Oid relationOid, ReindexParams *params)
*/ */
oldcontext = MemoryContextSwitchTo(private_context); oldcontext = MemoryContextSwitchTo(private_context);
newidx = palloc(sizeof(ReindexIndexInfo)); newidx = palloc_object(ReindexIndexInfo);
newidx->indexId = newIndexId; newidx->indexId = newIndexId;
newidx->safe = idx->safe; newidx->safe = idx->safe;
newidx->tableId = idx->tableId; newidx->tableId = idx->tableId;
@ -3748,10 +3748,10 @@ ReindexRelationConcurrently(Oid relationOid, ReindexParams *params)
* avoid multiple locks taken on the same relation, instead we rely on * avoid multiple locks taken on the same relation, instead we rely on
* parentRelationIds built earlier. * parentRelationIds built earlier.
*/ */
lockrelid = palloc(sizeof(*lockrelid)); lockrelid = palloc_object(LockRelId);
*lockrelid = indexRel->rd_lockInfo.lockRelId; *lockrelid = indexRel->rd_lockInfo.lockRelId;
relationLocks = lappend(relationLocks, lockrelid); relationLocks = lappend(relationLocks, lockrelid);
lockrelid = palloc(sizeof(*lockrelid)); lockrelid = palloc_object(LockRelId);
*lockrelid = newIndexRel->rd_lockInfo.lockRelId; *lockrelid = newIndexRel->rd_lockInfo.lockRelId;
relationLocks = lappend(relationLocks, lockrelid); relationLocks = lappend(relationLocks, lockrelid);
@ -3783,11 +3783,11 @@ ReindexRelationConcurrently(Oid relationOid, ReindexParams *params)
oldcontext = MemoryContextSwitchTo(private_context); oldcontext = MemoryContextSwitchTo(private_context);
/* Add lockrelid of heap relation to the list of locked relations */ /* Add lockrelid of heap relation to the list of locked relations */
lockrelid = palloc(sizeof(*lockrelid)); lockrelid = palloc_object(LockRelId);
*lockrelid = heapRelation->rd_lockInfo.lockRelId; *lockrelid = heapRelation->rd_lockInfo.lockRelId;
relationLocks = lappend(relationLocks, lockrelid); relationLocks = lappend(relationLocks, lockrelid);
heaplocktag = (LOCKTAG *) palloc(sizeof(LOCKTAG)); heaplocktag = palloc_object(LOCKTAG);
/* Save the LOCKTAG for this parent relation for the wait phase */ /* Save the LOCKTAG for this parent relation for the wait phase */
SET_LOCKTAG_RELATION(*heaplocktag, lockrelid->dbId, lockrelid->relId); SET_LOCKTAG_RELATION(*heaplocktag, lockrelid->dbId, lockrelid->relId);

View File

@ -98,7 +98,7 @@ PrepareQuery(ParseState *pstate, PrepareStmt *stmt,
int i; int i;
ListCell *l; ListCell *l;
argtypes = (Oid *) palloc(nargs * sizeof(Oid)); argtypes = palloc_array(Oid, nargs);
i = 0; i = 0;
foreach(l, stmt->argtypes) foreach(l, stmt->argtypes)
@ -698,7 +698,7 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
{ {
Oid *result_types; Oid *result_types;
result_types = (Oid *) palloc(result_desc->natts * sizeof(Oid)); result_types = palloc_array(Oid, result_desc->natts);
for (int i = 0; i < result_desc->natts; i++) for (int i = 0; i < result_desc->natts; i++)
result_types[i] = result_desc->attrs[i].atttypid; result_types[i] = result_desc->attrs[i].atttypid;
values[4] = build_regtype_array(result_types, result_desc->natts); values[4] = build_regtype_array(result_types, result_desc->natts);
@ -732,7 +732,7 @@ build_regtype_array(Oid *param_types, int num_params)
ArrayType *result; ArrayType *result;
int i; int i;
tmp_ary = (Datum *) palloc(num_params * sizeof(Datum)); tmp_ary = palloc_array(Datum, num_params);
for (i = 0; i < num_params; i++) for (i = 0; i < num_params; i++)
tmp_ary[i] = ObjectIdGetDatum(param_types[i]); tmp_ary[i] = ObjectIdGetDatum(param_types[i]);

View File

@ -479,7 +479,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
* per-query memory context. Everything else should be kept inside the * per-query memory context. Everything else should be kept inside the
* subsidiary hashCxt or batchCxt. * subsidiary hashCxt or batchCxt.
*/ */
hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData)); hashtable = palloc_object(HashJoinTableData);
hashtable->nbuckets = nbuckets; hashtable->nbuckets = nbuckets;
hashtable->nbuckets_original = nbuckets; hashtable->nbuckets_original = nbuckets;
hashtable->nbuckets_optimal = nbuckets; hashtable->nbuckets_optimal = nbuckets;
@ -540,12 +540,10 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
* remember whether the join operators are strict. * remember whether the join operators are strict.
*/ */
nkeys = list_length(hashOperators); nkeys = list_length(hashOperators);
hashtable->outer_hashfunctions = hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo)); hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
hashtable->inner_hashfunctions = hashtable->hashStrict = palloc_array(bool, nkeys);
(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo)); hashtable->collations = palloc_array(Oid, nkeys);
hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
hashtable->collations = (Oid *) palloc(nkeys * sizeof(Oid));
i = 0; i = 0;
forboth(ho, hashOperators, hc, hashCollations) forboth(ho, hashOperators, hc, hashCollations)
{ {
@ -569,10 +567,8 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
* allocate and initialize the file arrays in hashCxt (not needed for * allocate and initialize the file arrays in hashCxt (not needed for
* parallel case which uses shared tuplestores instead of raw files) * parallel case which uses shared tuplestores instead of raw files)
*/ */
hashtable->innerBatchFile = (BufFile **) hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
palloc0(nbatch * sizeof(BufFile *)); hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
hashtable->outerBatchFile = (BufFile **)
palloc0(nbatch * sizeof(BufFile *));
/* The files will not be opened until needed... */ /* The files will not be opened until needed... */
/* ... but make sure we have temp tablespaces established for them */ /* ... but make sure we have temp tablespaces established for them */
PrepareTempTablespaces(); PrepareTempTablespaces();
@ -636,8 +632,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
*/ */
MemoryContextSwitchTo(hashtable->batchCxt); MemoryContextSwitchTo(hashtable->batchCxt);
hashtable->buckets.unshared = (HashJoinTuple *) hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
palloc0(nbuckets * sizeof(HashJoinTuple));
/* /*
* Set up for skew optimization, if possible and there's a need for * Set up for skew optimization, if possible and there's a need for
@ -937,20 +932,16 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
if (hashtable->innerBatchFile == NULL) if (hashtable->innerBatchFile == NULL)
{ {
/* we had no file arrays before */ /* we had no file arrays before */
hashtable->innerBatchFile = (BufFile **) hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
palloc0(nbatch * sizeof(BufFile *)); hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
hashtable->outerBatchFile = (BufFile **)
palloc0(nbatch * sizeof(BufFile *));
/* time to establish the temp tablespaces, too */ /* time to establish the temp tablespaces, too */
PrepareTempTablespaces(); PrepareTempTablespaces();
} }
else else
{ {
/* enlarge arrays and zero out added entries */ /* enlarge arrays and zero out added entries */
hashtable->innerBatchFile = (BufFile **) hashtable->innerBatchFile = repalloc_array(hashtable->innerBatchFile, BufFile *, nbatch);
repalloc(hashtable->innerBatchFile, nbatch * sizeof(BufFile *)); hashtable->outerBatchFile = repalloc_array(hashtable->outerBatchFile, BufFile *, nbatch);
hashtable->outerBatchFile = (BufFile **)
repalloc(hashtable->outerBatchFile, nbatch * sizeof(BufFile *));
MemSet(hashtable->innerBatchFile + oldnbatch, 0, MemSet(hashtable->innerBatchFile + oldnbatch, 0,
(nbatch - oldnbatch) * sizeof(BufFile *)); (nbatch - oldnbatch) * sizeof(BufFile *));
MemSet(hashtable->outerBatchFile + oldnbatch, 0, MemSet(hashtable->outerBatchFile + oldnbatch, 0,
@ -977,8 +968,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal; hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
hashtable->buckets.unshared = hashtable->buckets.unshared =
repalloc(hashtable->buckets.unshared, repalloc_array(hashtable->buckets.unshared,
sizeof(HashJoinTuple) * hashtable->nbuckets); HashJoinTuple, hashtable->nbuckets);
} }
/* /*
@ -1371,7 +1362,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable)
/* Get our hands on the previous generation of batches. */ /* Get our hands on the previous generation of batches. */
old_batches = (ParallelHashJoinBatch *) old_batches = (ParallelHashJoinBatch *)
dsa_get_address(hashtable->area, pstate->old_batches); dsa_get_address(hashtable->area, pstate->old_batches);
old_inner_tuples = palloc0(sizeof(SharedTuplestoreAccessor *) * old_nbatch); old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
for (i = 1; i < old_nbatch; ++i) for (i = 1; i < old_nbatch; ++i)
{ {
ParallelHashJoinBatch *shared = ParallelHashJoinBatch *shared =
@ -1477,8 +1468,8 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
* chunks) * chunks)
*/ */
hashtable->buckets.unshared = hashtable->buckets.unshared =
(HashJoinTuple *) repalloc(hashtable->buckets.unshared, repalloc_array(hashtable->buckets.unshared,
hashtable->nbuckets * sizeof(HashJoinTuple)); HashJoinTuple, hashtable->nbuckets);
memset(hashtable->buckets.unshared, 0, memset(hashtable->buckets.unshared, 0,
hashtable->nbuckets * sizeof(HashJoinTuple)); hashtable->nbuckets * sizeof(HashJoinTuple));
@ -2170,8 +2161,7 @@ ExecHashTableReset(HashJoinTable hashtable)
oldcxt = MemoryContextSwitchTo(hashtable->batchCxt); oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
/* Reallocate and reinitialize the hash bucket headers. */ /* Reallocate and reinitialize the hash bucket headers. */
hashtable->buckets.unshared = (HashJoinTuple *) hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
palloc0(nbuckets * sizeof(HashJoinTuple));
hashtable->spaceUsed = 0; hashtable->spaceUsed = 0;
@ -2666,8 +2656,7 @@ ExecShutdownHash(HashState *node)
{ {
/* Allocate save space if EXPLAIN'ing and we didn't do so already */ /* Allocate save space if EXPLAIN'ing and we didn't do so already */
if (node->ps.instrument && !node->hinstrument) if (node->ps.instrument && !node->hinstrument)
node->hinstrument = (HashInstrumentation *) node->hinstrument = palloc0_object(HashInstrumentation);
palloc0(sizeof(HashInstrumentation));
/* Now accumulate data for the current (final) hash table */ /* Now accumulate data for the current (final) hash table */
if (node->hinstrument && node->hashtable) if (node->hinstrument && node->hashtable)
ExecHashAccumInstrumentation(node->hinstrument, node->hashtable); ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
@ -2977,8 +2966,8 @@ ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
/* Allocate this backend's accessor array. */ /* Allocate this backend's accessor array. */
hashtable->nbatch = nbatch; hashtable->nbatch = nbatch;
hashtable->batches = (ParallelHashJoinBatchAccessor *) hashtable->batches =
palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch); palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
/* Set up the shared state, tuplestores and backend-local accessors. */ /* Set up the shared state, tuplestores and backend-local accessors. */
for (i = 0; i < hashtable->nbatch; ++i) for (i = 0; i < hashtable->nbatch; ++i)
@ -3083,8 +3072,8 @@ ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
/* Allocate this backend's accessor array. */ /* Allocate this backend's accessor array. */
hashtable->nbatch = pstate->nbatch; hashtable->nbatch = pstate->nbatch;
hashtable->batches = (ParallelHashJoinBatchAccessor *) hashtable->batches =
palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch); palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
/* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */ /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
batches = (ParallelHashJoinBatch *) batches = (ParallelHashJoinBatch *)

View File

@ -1660,7 +1660,7 @@ exec_bind_message(StringInfo input_message)
numPFormats = pq_getmsgint(input_message, 2); numPFormats = pq_getmsgint(input_message, 2);
if (numPFormats > 0) if (numPFormats > 0)
{ {
pformats = (int16 *) palloc(numPFormats * sizeof(int16)); pformats = palloc_array(int16, numPFormats);
for (int i = 0; i < numPFormats; i++) for (int i = 0; i < numPFormats; i++)
pformats[i] = pq_getmsgint(input_message, 2); pformats[i] = pq_getmsgint(input_message, 2);
} }
@ -1848,8 +1848,7 @@ exec_bind_message(StringInfo input_message)
oldcxt = MemoryContextSwitchTo(MessageContext); oldcxt = MemoryContextSwitchTo(MessageContext);
if (knownTextValues == NULL) if (knownTextValues == NULL)
knownTextValues = knownTextValues = palloc0_array(char *, numParams);
palloc0(numParams * sizeof(char *));
if (log_parameter_max_length_on_error < 0) if (log_parameter_max_length_on_error < 0)
knownTextValues[paramno] = pstrdup(pstring); knownTextValues[paramno] = pstrdup(pstring);
@ -1958,7 +1957,7 @@ exec_bind_message(StringInfo input_message)
numRFormats = pq_getmsgint(input_message, 2); numRFormats = pq_getmsgint(input_message, 2);
if (numRFormats > 0) if (numRFormats > 0)
{ {
rformats = (int16 *) palloc(numRFormats * sizeof(int16)); rformats = palloc_array(int16, numRFormats);
for (int i = 0; i < numRFormats; i++) for (int i = 0; i < numRFormats; i++)
rformats[i] = pq_getmsgint(input_message, 2); rformats[i] = pq_getmsgint(input_message, 2);
} }
@ -4517,7 +4516,7 @@ PostgresMain(const char *dbname, const char *username)
numParams = pq_getmsgint(&input_message, 2); numParams = pq_getmsgint(&input_message, 2);
if (numParams > 0) if (numParams > 0)
{ {
paramTypes = (Oid *) palloc(numParams * sizeof(Oid)); paramTypes = palloc_array(Oid, numParams);
for (int i = 0; i < numParams; i++) for (int i = 0; i < numParams; i++)
paramTypes[i] = pq_getmsgint(&input_message, 4); paramTypes[i] = pq_getmsgint(&input_message, 4);
} }

View File

@ -400,7 +400,7 @@ flagInhIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
if (parentidx == NULL) if (parentidx == NULL)
continue; continue;
attachinfo = (IndexAttachInfo *) pg_malloc(sizeof(IndexAttachInfo)); attachinfo = pg_malloc_object(IndexAttachInfo);
attachinfo->dobj.objType = DO_INDEX_ATTACH; attachinfo->dobj.objType = DO_INDEX_ATTACH;
attachinfo->dobj.catId.tableoid = 0; attachinfo->dobj.catId.tableoid = 0;
@ -530,7 +530,7 @@ flagInhAttrs(DumpOptions *dopt, TableInfo *tblinfo, int numTables)
{ {
AttrDefInfo *attrDef; AttrDefInfo *attrDef;
attrDef = (AttrDefInfo *) pg_malloc(sizeof(AttrDefInfo)); attrDef = pg_malloc_object(AttrDefInfo);
attrDef->dobj.objType = DO_ATTRDEF; attrDef->dobj.objType = DO_ATTRDEF;
attrDef->dobj.catId.tableoid = 0; attrDef->dobj.catId.tableoid = 0;
attrDef->dobj.catId.oid = 0; attrDef->dobj.catId.oid = 0;
@ -600,14 +600,12 @@ AssignDumpId(DumpableObject *dobj)
if (allocedDumpIds <= 0) if (allocedDumpIds <= 0)
{ {
newAlloc = 256; newAlloc = 256;
dumpIdMap = (DumpableObject **) dumpIdMap = pg_malloc_array(DumpableObject *, newAlloc);
pg_malloc(newAlloc * sizeof(DumpableObject *));
} }
else else
{ {
newAlloc = allocedDumpIds * 2; newAlloc = allocedDumpIds * 2;
dumpIdMap = (DumpableObject **) dumpIdMap = pg_realloc_array(dumpIdMap, DumpableObject *, newAlloc);
pg_realloc(dumpIdMap, newAlloc * sizeof(DumpableObject *));
} }
memset(dumpIdMap + allocedDumpIds, 0, memset(dumpIdMap + allocedDumpIds, 0,
(newAlloc - allocedDumpIds) * sizeof(DumpableObject *)); (newAlloc - allocedDumpIds) * sizeof(DumpableObject *));
@ -700,8 +698,7 @@ getDumpableObjects(DumpableObject ***objs, int *numObjs)
int i, int i,
j; j;
*objs = (DumpableObject **) *objs = pg_malloc_array(DumpableObject *, allocedDumpIds);
pg_malloc(allocedDumpIds * sizeof(DumpableObject *));
j = 0; j = 0;
for (i = 1; i < allocedDumpIds; i++) for (i = 1; i < allocedDumpIds; i++)
{ {
@ -724,15 +721,13 @@ addObjectDependency(DumpableObject *dobj, DumpId refId)
if (dobj->allocDeps <= 0) if (dobj->allocDeps <= 0)
{ {
dobj->allocDeps = 16; dobj->allocDeps = 16;
dobj->dependencies = (DumpId *) dobj->dependencies = pg_malloc_array(DumpId, dobj->allocDeps);
pg_malloc(dobj->allocDeps * sizeof(DumpId));
} }
else else
{ {
dobj->allocDeps *= 2; dobj->allocDeps *= 2;
dobj->dependencies = (DumpId *) dobj->dependencies = pg_realloc_array(dobj->dependencies,
pg_realloc(dobj->dependencies, DumpId, dobj->allocDeps);
dobj->allocDeps * sizeof(DumpId));
} }
} }
dobj->dependencies[dobj->nDeps++] = refId; dobj->dependencies[dobj->nDeps++] = refId;
@ -990,8 +985,7 @@ findParentsByOid(TableInfo *self,
if (numParents > 0) if (numParents > 0)
{ {
self->parents = (TableInfo **) self->parents = pg_malloc_array(TableInfo *, numParents);
pg_malloc(sizeof(TableInfo *) * numParents);
j = 0; j = 0;
for (i = 0; i < numInherits; i++) for (i = 0; i < numInherits; i++)
{ {

View File

@ -151,7 +151,7 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
/* /*
* Set up some special context used in compressing data. * Set up some special context used in compressing data.
*/ */
ctx = (lclContext *) pg_malloc0(sizeof(lclContext)); ctx = pg_malloc0_object(lclContext);
AH->formatData = (void *) ctx; AH->formatData = (void *) ctx;
ctx->filePos = 0; ctx->filePos = 0;
ctx->isSpecialScript = 0; ctx->isSpecialScript = 0;
@ -240,7 +240,7 @@ _ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
lclTocEntry *ctx; lclTocEntry *ctx;
char fn[K_STD_BUF_SIZE]; char fn[K_STD_BUF_SIZE];
ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry)); ctx = pg_malloc0_object(lclTocEntry);
if (te->dataDumper != NULL) if (te->dataDumper != NULL)
{ {
snprintf(fn, sizeof(fn), "%d.dat", te->dumpId); snprintf(fn, sizeof(fn), "%d.dat", te->dumpId);
@ -272,7 +272,7 @@ _ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
if (ctx == NULL) if (ctx == NULL)
{ {
ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry)); ctx = pg_malloc0_object(lclTocEntry);
te->formatData = (void *) ctx; te->formatData = (void *) ctx;
} }
@ -337,7 +337,7 @@ tarOpen(ArchiveHandle *AH, const char *filename, char mode)
{ {
int old_umask; int old_umask;
tm = pg_malloc0(sizeof(TAR_MEMBER)); tm = pg_malloc0_object(TAR_MEMBER);
/* /*
* POSIX does not require, but permits, tmpfile() to restrict file * POSIX does not require, but permits, tmpfile() to restrict file
@ -1052,7 +1052,7 @@ static TAR_MEMBER *
_tarPositionTo(ArchiveHandle *AH, const char *filename) _tarPositionTo(ArchiveHandle *AH, const char *filename)
{ {
lclContext *ctx = (lclContext *) AH->formatData; lclContext *ctx = (lclContext *) AH->formatData;
TAR_MEMBER *th = pg_malloc0(sizeof(TAR_MEMBER)); TAR_MEMBER *th = pg_malloc0_object(TAR_MEMBER);
char c; char c;
char header[TAR_BLOCK_SIZE]; char header[TAR_BLOCK_SIZE];
size_t i, size_t i,

View File

@ -247,8 +247,8 @@ main(int argc, char *argv[])
do do
{ {
#define PARAMS_ARRAY_SIZE 8 #define PARAMS_ARRAY_SIZE 8
const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); const char **keywords = pg_malloc_array(const char *, PARAMS_ARRAY_SIZE);
const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); const char **values = pg_malloc_array(const char *, PARAMS_ARRAY_SIZE);
keywords[0] = "host"; keywords[0] = "host";
values[0] = options.host; values[0] = options.host;
@ -750,7 +750,7 @@ simple_action_list_append(SimpleActionList *list,
{ {
SimpleActionListCell *cell; SimpleActionListCell *cell;
cell = (SimpleActionListCell *) pg_malloc(sizeof(SimpleActionListCell)); cell = pg_malloc_object(SimpleActionListCell);
cell->next = NULL; cell->next = NULL;
cell->action = action; cell->action = action;

View File

@ -39,7 +39,7 @@ get_configdata(const char *my_exec_path, size_t *configdata_len)
/* Adjust this to match the number of items filled below */ /* Adjust this to match the number of items filled below */
*configdata_len = 23; *configdata_len = 23;
configdata = (ConfigData *) palloc(*configdata_len * sizeof(ConfigData)); configdata = palloc_array(ConfigData, *configdata_len);
configdata[i].name = pstrdup("BINDIR"); configdata[i].name = pstrdup("BINDIR");
strlcpy(path, my_exec_path, sizeof(path)); strlcpy(path, my_exec_path, sizeof(path));

View File

@ -59,7 +59,7 @@ get_controlfile(const char *DataDir, bool *crc_ok_p)
AssertArg(crc_ok_p); AssertArg(crc_ok_p);
ControlFile = palloc(sizeof(ControlFileData)); ControlFile = palloc_object(ControlFileData);
snprintf(ControlFilePath, MAXPGPATH, "%s/global/pg_control", DataDir); snprintf(ControlFilePath, MAXPGPATH, "%s/global/pg_control", DataDir);
#ifndef FRONTEND #ifndef FRONTEND