mirror of
https://github.com/postgres/postgres.git
synced 2025-07-09 22:41:56 +03:00
Pre branch pgindent / pgperltidy run
Along the way make a slight adjustment to src/include/utils/queryjumble.h to avoid an unused typedef.
This commit is contained in:
@ -410,8 +410,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the FSM knows nothing of the rel, try the last page before we
|
||||
* give up and extend. This avoids one-tuple-per-page syndrome during
|
||||
* If the FSM knows nothing of the rel, try the last page before we give
|
||||
* up and extend. This avoids one-tuple-per-page syndrome during
|
||||
* bootstrapping or in a recently-started system.
|
||||
*/
|
||||
if (targetBlock == InvalidBlockNumber)
|
||||
|
@ -890,11 +890,11 @@ sub morph_row_for_pgattr
|
||||
# Copy the type data from pg_type, and add some type-dependent items
|
||||
my $type = $types{$atttype};
|
||||
|
||||
$row->{atttypid} = $type->{oid};
|
||||
$row->{attlen} = $type->{typlen};
|
||||
$row->{attbyval} = $type->{typbyval};
|
||||
$row->{attalign} = $type->{typalign};
|
||||
$row->{attstorage} = $type->{typstorage};
|
||||
$row->{atttypid} = $type->{oid};
|
||||
$row->{attlen} = $type->{typlen};
|
||||
$row->{attbyval} = $type->{typbyval};
|
||||
$row->{attalign} = $type->{typalign};
|
||||
$row->{attstorage} = $type->{typstorage};
|
||||
$row->{attcompression} = '\0';
|
||||
|
||||
# set attndims if it's an array type
|
||||
|
@ -2294,7 +2294,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum,
|
||||
valuesAtt[Anum_pg_attribute_atthasdef - 1] = true;
|
||||
replacesAtt[Anum_pg_attribute_atthasdef - 1] = true;
|
||||
|
||||
if (rel->rd_rel->relkind == RELKIND_RELATION && add_column_mode &&
|
||||
if (rel->rd_rel->relkind == RELKIND_RELATION && add_column_mode &&
|
||||
!attgenerated)
|
||||
{
|
||||
expr2 = expression_planner(expr2);
|
||||
|
@ -704,16 +704,16 @@ ExecInsert(ModifyTableState *mtstate,
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the batch slots. We don't know how many slots will be
|
||||
* needed, so we initialize them as the batch grows, and we keep
|
||||
* them across batches. To mitigate an inefficiency in how resource
|
||||
* owner handles objects with many references (as with many slots
|
||||
* all referencing the same tuple descriptor) we copy the tuple
|
||||
* descriptor for each slot.
|
||||
* Initialize the batch slots. We don't know how many slots will
|
||||
* be needed, so we initialize them as the batch grows, and we
|
||||
* keep them across batches. To mitigate an inefficiency in how
|
||||
* resource owner handles objects with many references (as with
|
||||
* many slots all referencing the same tuple descriptor) we copy
|
||||
* the tuple descriptor for each slot.
|
||||
*/
|
||||
if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
|
||||
{
|
||||
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
|
||||
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
|
||||
|
||||
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
|
||||
MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
|
||||
@ -3173,7 +3173,7 @@ ExecEndModifyTable(ModifyTableState *node)
|
||||
*/
|
||||
for (i = 0; i < node->mt_nrels; i++)
|
||||
{
|
||||
int j;
|
||||
int j;
|
||||
ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
|
||||
|
||||
if (!resultRelInfo->ri_usesFdwDirectModify &&
|
||||
@ -3183,8 +3183,9 @@ ExecEndModifyTable(ModifyTableState *node)
|
||||
resultRelInfo);
|
||||
|
||||
/*
|
||||
* Cleanup the initialized batch slots. This only matters for FDWs with
|
||||
* batching, but the other cases will have ri_NumSlotsInitialized == 0.
|
||||
* Cleanup the initialized batch slots. This only matters for FDWs
|
||||
* with batching, but the other cases will have ri_NumSlotsInitialized
|
||||
* == 0.
|
||||
*/
|
||||
for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
|
||||
{
|
||||
|
@ -2215,8 +2215,8 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
|
||||
change_done:
|
||||
|
||||
/*
|
||||
* If speculative insertion was confirmed, the record isn't
|
||||
* needed anymore.
|
||||
* If speculative insertion was confirmed, the record
|
||||
* isn't needed anymore.
|
||||
*/
|
||||
if (specinsert != NULL)
|
||||
{
|
||||
|
@ -759,7 +759,7 @@ fetch_remote_table_info(char *nspname, char *relname,
|
||||
" ORDER BY a.attnum",
|
||||
lrel->remoteid,
|
||||
(walrcv_server_version(LogRepWorkerWalRcvConn) >= 120000 ?
|
||||
"AND a.attgenerated = ''" : ""),
|
||||
"AND a.attgenerated = ''" : ""),
|
||||
lrel->remoteid);
|
||||
res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data,
|
||||
lengthof(attrRow), attrRow);
|
||||
|
@ -1031,7 +1031,8 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
|
||||
entry->pubactions.pubinsert = entry->pubactions.pubupdate =
|
||||
entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false;
|
||||
entry->publish_as_relid = InvalidOid;
|
||||
entry->map = NULL; /* will be set by maybe_send_schema() if needed */
|
||||
entry->map = NULL; /* will be set by maybe_send_schema() if
|
||||
* needed */
|
||||
}
|
||||
|
||||
/* Validate the entry */
|
||||
|
@ -1974,7 +1974,7 @@ GetOldestNonRemovableTransactionId(Relation rel)
|
||||
if (rel == NULL || rel->rd_rel->relisshared || RecoveryInProgress())
|
||||
return horizons.shared_oldest_nonremovable;
|
||||
else if (IsCatalogRelation(rel) ||
|
||||
RelationIsAccessibleInLogicalDecoding(rel))
|
||||
RelationIsAccessibleInLogicalDecoding(rel))
|
||||
return horizons.catalog_oldest_nonremovable;
|
||||
else if (RELATION_IS_LOCAL(rel))
|
||||
return horizons.temp_oldest_nonremovable;
|
||||
|
Reference in New Issue
Block a user