mirror of
https://github.com/postgres/postgres.git
synced 2025-11-10 17:42:29 +03:00
Initial pgindent and pgperltidy run for v14.
Also "make reformat-dat-files". The only change worthy of note is that pgindent messed up the formatting of launcher.c's struct LogicalRepWorkerId, which led me to notice that that struct wasn't used at all anymore, so I just took it out.
This commit is contained in:
@@ -645,11 +645,11 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
|
||||
* range values; if so, have the pages in the range added
|
||||
* to the output bitmap.
|
||||
*
|
||||
* The opclass may or may not support processing of multiple
|
||||
* scan keys. We can determine that based on the number of
|
||||
* arguments - functions with extra parameter (number of scan
|
||||
* keys) do support this, otherwise we have to simply pass the
|
||||
* scan keys one by one.
|
||||
* The opclass may or may not support processing of
|
||||
* multiple scan keys. We can determine that based on the
|
||||
* number of arguments - functions with extra parameter
|
||||
* (number of scan keys) do support this, otherwise we
|
||||
* have to simply pass the scan keys one by one.
|
||||
*/
|
||||
if (consistentFn[attno - 1].fn_nargs >= 4)
|
||||
{
|
||||
@@ -667,10 +667,10 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
|
||||
/*
|
||||
* Check keys one by one
|
||||
*
|
||||
* When there are multiple scan keys, failure to meet the
|
||||
* criteria for a single one of them is enough to discard
|
||||
* the range as a whole, so break out of the loop as soon
|
||||
* as a false return value is obtained.
|
||||
* When there are multiple scan keys, failure to meet
|
||||
* the criteria for a single one of them is enough to
|
||||
* discard the range as a whole, so break out of the
|
||||
* loop as soon as a false return value is obtained.
|
||||
*/
|
||||
int keyno;
|
||||
|
||||
|
||||
@@ -258,7 +258,7 @@ typedef struct BloomFilter
|
||||
/* data of the bloom filter */
|
||||
char data[FLEXIBLE_ARRAY_MEMBER];
|
||||
|
||||
} BloomFilter;
|
||||
} BloomFilter;
|
||||
|
||||
|
||||
/*
|
||||
@@ -341,7 +341,7 @@ bloom_init(int ndistinct, double false_positive_rate)
|
||||
* Add value to the bloom filter.
|
||||
*/
|
||||
static BloomFilter *
|
||||
bloom_add_value(BloomFilter * filter, uint32 value, bool *updated)
|
||||
bloom_add_value(BloomFilter *filter, uint32 value, bool *updated)
|
||||
{
|
||||
int i;
|
||||
uint64 h1,
|
||||
@@ -378,7 +378,7 @@ bloom_add_value(BloomFilter * filter, uint32 value, bool *updated)
|
||||
* Check if the bloom filter contains a particular value.
|
||||
*/
|
||||
static bool
|
||||
bloom_contains_value(BloomFilter * filter, uint32 value)
|
||||
bloom_contains_value(BloomFilter *filter, uint32 value)
|
||||
{
|
||||
int i;
|
||||
uint64 h1,
|
||||
@@ -414,7 +414,7 @@ typedef struct BloomOpaque
|
||||
*/
|
||||
FmgrInfo extra_procinfos[BLOOM_MAX_PROCNUMS];
|
||||
bool extra_proc_missing[BLOOM_MAX_PROCNUMS];
|
||||
} BloomOpaque;
|
||||
} BloomOpaque;
|
||||
|
||||
static FmgrInfo *bloom_get_procinfo(BrinDesc *bdesc, uint16 attno,
|
||||
uint16 procnum);
|
||||
|
||||
@@ -114,7 +114,7 @@ typedef struct MinmaxMultiOpaque
|
||||
bool extra_proc_missing[MINMAX_MAX_PROCNUMS];
|
||||
Oid cached_subtype;
|
||||
FmgrInfo strategy_procinfos[BTMaxStrategyNumber];
|
||||
} MinmaxMultiOpaque;
|
||||
} MinmaxMultiOpaque;
|
||||
|
||||
/*
|
||||
* Storage type for BRIN's minmax reloptions
|
||||
@@ -261,7 +261,7 @@ typedef struct compare_context
|
||||
{
|
||||
FmgrInfo *cmpFn;
|
||||
Oid colloid;
|
||||
} compare_context;
|
||||
} compare_context;
|
||||
|
||||
static int compare_values(const void *a, const void *b, void *arg);
|
||||
|
||||
@@ -670,11 +670,11 @@ range_serialize(Ranges *range)
|
||||
/*
|
||||
* For values passed by value, we need to copy just the
|
||||
* significant bytes - we can't use memcpy directly, as that
|
||||
* assumes little endian behavior. store_att_byval does
|
||||
* almost what we need, but it requires properly aligned
|
||||
* buffer - the output buffer does not guarantee that. So we
|
||||
* simply use a local Datum variable (which guarantees proper
|
||||
* alignment), and then copy the value from it.
|
||||
* assumes little endian behavior. store_att_byval does almost
|
||||
* what we need, but it requires properly aligned buffer - the
|
||||
* output buffer does not guarantee that. So we simply use a local
|
||||
* Datum variable (which guarantees proper alignment), and then
|
||||
* copy the value from it.
|
||||
*/
|
||||
store_att_byval(&tmp, range->values[i], typlen);
|
||||
|
||||
@@ -771,7 +771,7 @@ range_deserialize(int maxvalues, SerializedRanges *serialized)
|
||||
dataptr = NULL;
|
||||
for (i = 0; (i < nvalues) && (!typbyval); i++)
|
||||
{
|
||||
if (typlen > 0) /* fixed-length by-ref types */
|
||||
if (typlen > 0) /* fixed-length by-ref types */
|
||||
datalen += MAXALIGN(typlen);
|
||||
else if (typlen == -1) /* varlena */
|
||||
{
|
||||
@@ -824,7 +824,8 @@ range_deserialize(int maxvalues, SerializedRanges *serialized)
|
||||
}
|
||||
else if (typlen == -2) /* cstring */
|
||||
{
|
||||
Size slen = strlen(ptr) + 1;
|
||||
Size slen = strlen(ptr) + 1;
|
||||
|
||||
range->values[i] = PointerGetDatum(dataptr);
|
||||
|
||||
memcpy(dataptr, ptr, slen);
|
||||
@@ -2156,8 +2157,8 @@ brin_minmax_multi_distance_interval(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* Delta is (fractional) number of days between the intervals. Assume
|
||||
* months have 30 days for consistency with interval_cmp_internal.
|
||||
* We don't need to be exact, in the worst case we'll build a bit less
|
||||
* months have 30 days for consistency with interval_cmp_internal. We
|
||||
* don't need to be exact, in the worst case we'll build a bit less
|
||||
* efficient ranges. But we should not contradict interval_cmp.
|
||||
*/
|
||||
dayfraction = result->time % USECS_PER_DAY;
|
||||
@@ -2315,13 +2316,12 @@ brin_minmax_multi_distance_inet(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* The length is calculated from the mask length, because we sort the
|
||||
* addresses by first address in the range, so A.B.C.D/24 < A.B.C.1
|
||||
* (the first range starts at A.B.C.0, which is before A.B.C.1). We
|
||||
* don't want to produce negative delta in this case, so we just cut
|
||||
* the extra bytes.
|
||||
* addresses by first address in the range, so A.B.C.D/24 < A.B.C.1 (the
|
||||
* first range starts at A.B.C.0, which is before A.B.C.1). We don't want
|
||||
* to produce negative delta in this case, so we just cut the extra bytes.
|
||||
*
|
||||
* XXX Maybe this should be a bit more careful and cut the bits, not
|
||||
* just whole bytes.
|
||||
* XXX Maybe this should be a bit more careful and cut the bits, not just
|
||||
* whole bytes.
|
||||
*/
|
||||
lena = ip_bits(ipa);
|
||||
lenb = ip_bits(ipb);
|
||||
@@ -2331,8 +2331,8 @@ brin_minmax_multi_distance_inet(PG_FUNCTION_ARGS)
|
||||
/* apply the network mask to both addresses */
|
||||
for (i = 0; i < len; i++)
|
||||
{
|
||||
unsigned char mask;
|
||||
int nbits;
|
||||
unsigned char mask;
|
||||
int nbits;
|
||||
|
||||
nbits = lena - (i * 8);
|
||||
if (nbits < 8)
|
||||
|
||||
@@ -371,6 +371,7 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
|
||||
regBuf = ReadBuffer(idxrel, ItemPointerGetBlockNumber(iptr));
|
||||
LockBuffer(regBuf, BUFFER_LOCK_EXCLUSIVE);
|
||||
regPg = BufferGetPage(regBuf);
|
||||
|
||||
/*
|
||||
* We're only removing data, not reading it, so there's no need to
|
||||
* TestForOldSnapshot here.
|
||||
|
||||
@@ -177,15 +177,15 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
|
||||
datumno < brdesc->bd_info[keyno]->oi_nstored;
|
||||
datumno++)
|
||||
{
|
||||
Datum value = tuple->bt_columns[keyno].bv_values[datumno];
|
||||
Datum value = tuple->bt_columns[keyno].bv_values[datumno];
|
||||
|
||||
#ifdef TOAST_INDEX_HACK
|
||||
|
||||
/* We must look at the stored type, not at the index descriptor. */
|
||||
TypeCacheEntry *atttype = brdesc->bd_info[keyno]->oi_typcache[datumno];
|
||||
TypeCacheEntry *atttype = brdesc->bd_info[keyno]->oi_typcache[datumno];
|
||||
|
||||
/* Do we need to free the value at the end? */
|
||||
bool free_value = false;
|
||||
bool free_value = false;
|
||||
|
||||
/* For non-varlena types we don't need to do anything special */
|
||||
if (atttype->typlen != -1)
|
||||
@@ -201,9 +201,9 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
|
||||
* If value is stored EXTERNAL, must fetch it so we are not
|
||||
* depending on outside storage.
|
||||
*
|
||||
* XXX Is this actually true? Could it be that the summary is
|
||||
* NULL even for range with non-NULL data? E.g. degenerate bloom
|
||||
* filter may be thrown away, etc.
|
||||
* XXX Is this actually true? Could it be that the summary is NULL
|
||||
* even for range with non-NULL data? E.g. degenerate bloom filter
|
||||
* may be thrown away, etc.
|
||||
*/
|
||||
if (VARATT_IS_EXTERNAL(DatumGetPointer(value)))
|
||||
{
|
||||
@@ -213,16 +213,16 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
|
||||
}
|
||||
|
||||
/*
|
||||
* If value is above size target, and is of a compressible datatype,
|
||||
* try to compress it in-line.
|
||||
* If value is above size target, and is of a compressible
|
||||
* datatype, try to compress it in-line.
|
||||
*/
|
||||
if (!VARATT_IS_EXTENDED(DatumGetPointer(value)) &&
|
||||
VARSIZE(DatumGetPointer(value)) > TOAST_INDEX_TARGET &&
|
||||
(atttype->typstorage == TYPSTORAGE_EXTENDED ||
|
||||
atttype->typstorage == TYPSTORAGE_MAIN))
|
||||
{
|
||||
Datum cvalue;
|
||||
char compression;
|
||||
Datum cvalue;
|
||||
char compression;
|
||||
Form_pg_attribute att = TupleDescAttr(brdesc->bd_tupdesc,
|
||||
keyno);
|
||||
|
||||
|
||||
@@ -103,14 +103,14 @@ index_form_tuple(TupleDesc tupleDescriptor,
|
||||
(att->attstorage == TYPSTORAGE_EXTENDED ||
|
||||
att->attstorage == TYPSTORAGE_MAIN))
|
||||
{
|
||||
Datum cvalue;
|
||||
char compression = att->attcompression;
|
||||
Datum cvalue;
|
||||
char compression = att->attcompression;
|
||||
|
||||
/*
|
||||
* If the compression method is not valid, use the default. We
|
||||
* don't expect this to happen for regular index columns, which
|
||||
* inherit the setting from the corresponding table column, but
|
||||
* we do expect it to happen whenever an expression is indexed.
|
||||
* inherit the setting from the corresponding table column, but we
|
||||
* do expect it to happen whenever an expression is indexed.
|
||||
*/
|
||||
if (!CompressionMethodIsValid(compression))
|
||||
compression = GetDefaultToastCompression();
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#include "utils/builtins.h"
|
||||
|
||||
/* GUC */
|
||||
int default_toast_compression = TOAST_PGLZ_COMPRESSION;
|
||||
int default_toast_compression = TOAST_PGLZ_COMPRESSION;
|
||||
|
||||
#define NO_LZ4_SUPPORT() \
|
||||
ereport(ERROR, \
|
||||
@@ -109,7 +109,7 @@ pglz_decompress_datum(const struct varlena *value)
|
||||
*/
|
||||
struct varlena *
|
||||
pglz_decompress_datum_slice(const struct varlena *value,
|
||||
int32 slicelength)
|
||||
int32 slicelength)
|
||||
{
|
||||
struct varlena *result;
|
||||
int32 rawsize;
|
||||
@@ -255,12 +255,12 @@ lz4_decompress_datum_slice(const struct varlena *value, int32 slicelength)
|
||||
ToastCompressionId
|
||||
toast_get_compression_id(struct varlena *attr)
|
||||
{
|
||||
ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
|
||||
ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
|
||||
|
||||
/*
|
||||
* If it is stored externally then fetch the compression method id from the
|
||||
* external toast pointer. If compressed inline, fetch it from the toast
|
||||
* compression header.
|
||||
* If it is stored externally then fetch the compression method id from
|
||||
* the external toast pointer. If compressed inline, fetch it from the
|
||||
* toast compression header.
|
||||
*/
|
||||
if (VARATT_IS_EXTERNAL_ONDISK(attr))
|
||||
{
|
||||
|
||||
@@ -48,7 +48,7 @@ toast_compress_datum(Datum value, char cmethod)
|
||||
{
|
||||
struct varlena *tmp = NULL;
|
||||
int32 valsize;
|
||||
ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
|
||||
ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
|
||||
|
||||
Assert(!VARATT_IS_EXTERNAL(DatumGetPointer(value)));
|
||||
Assert(!VARATT_IS_COMPRESSED(DatumGetPointer(value)));
|
||||
|
||||
@@ -236,7 +236,7 @@ execute_attr_map_slot(AttrMap *attrMap,
|
||||
Bitmapset *
|
||||
execute_attr_map_cols(AttrMap *attrMap, Bitmapset *in_cols)
|
||||
{
|
||||
Bitmapset *out_cols;
|
||||
Bitmapset *out_cols;
|
||||
int out_attnum;
|
||||
|
||||
/* fast path for the common trivial case */
|
||||
|
||||
@@ -35,9 +35,9 @@ static bool rtree_internal_consistent(BOX *key, BOX *query,
|
||||
static uint64 point_zorder_internal(float4 x, float4 y);
|
||||
static uint64 part_bits32_by2(uint32 x);
|
||||
static uint32 ieee_float32_to_uint32(float f);
|
||||
static int gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup);
|
||||
static int gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup);
|
||||
static Datum gist_bbox_zorder_abbrev_convert(Datum original, SortSupport ssup);
|
||||
static int gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup);
|
||||
static int gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup);
|
||||
static bool gist_bbox_zorder_abbrev_abort(int memtupcount, SortSupport ssup);
|
||||
|
||||
|
||||
|
||||
@@ -267,7 +267,7 @@ gistvalidate(Oid opclassoid)
|
||||
continue; /* got it */
|
||||
if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC ||
|
||||
i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC ||
|
||||
i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC)
|
||||
i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC)
|
||||
continue; /* optional methods */
|
||||
ereport(INFO,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
|
||||
@@ -432,11 +432,11 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
|
||||
* transactions on the primary might still be invisible to a read-only
|
||||
* transaction in the standby. We partly handle this problem by tracking
|
||||
* the minimum xmin of visible tuples as the cut-off XID while marking a
|
||||
* page all-visible on the primary and WAL log that along with the visibility
|
||||
* map SET operation. In hot standby, we wait for (or abort) all
|
||||
* transactions that can potentially may not see one or more tuples on the
|
||||
* page. That's how index-only scans work fine in hot standby. A crucial
|
||||
* difference between index-only scans and heap scans is that the
|
||||
* page all-visible on the primary and WAL log that along with the
|
||||
* visibility map SET operation. In hot standby, we wait for (or abort)
|
||||
* all transactions that can potentially may not see one or more tuples on
|
||||
* the page. That's how index-only scans work fine in hot standby. A
|
||||
* crucial difference between index-only scans and heap scans is that the
|
||||
* index-only scan completely relies on the visibility map where as heap
|
||||
* scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
|
||||
* the page-level flag can be trusted in the same way, because it might
|
||||
@@ -2095,11 +2095,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
|
||||
|
||||
/*
|
||||
* If we're inserting frozen entry into an empty page,
|
||||
* set visibility map bits and PageAllVisible() hint.
|
||||
* If we're inserting frozen entry into an empty page, set visibility map
|
||||
* bits and PageAllVisible() hint.
|
||||
*
|
||||
* If we're inserting frozen entry into already all_frozen page,
|
||||
* preserve this state.
|
||||
* If we're inserting frozen entry into already all_frozen page, preserve
|
||||
* this state.
|
||||
*/
|
||||
if (options & HEAP_INSERT_FROZEN)
|
||||
{
|
||||
@@ -2109,7 +2109,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
|
||||
if (visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer))
|
||||
vmstatus = visibilitymap_get_status(relation,
|
||||
BufferGetBlockNumber(buffer), &vmbuffer);
|
||||
BufferGetBlockNumber(buffer), &vmbuffer);
|
||||
|
||||
if ((starting_with_empty_page || vmstatus & VISIBILITYMAP_ALL_FROZEN))
|
||||
all_frozen_set = true;
|
||||
@@ -2139,8 +2139,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
(options & HEAP_INSERT_SPECULATIVE) != 0);
|
||||
|
||||
/*
|
||||
* If the page is all visible, need to clear that, unless we're only
|
||||
* going to add further frozen rows to it.
|
||||
* If the page is all visible, need to clear that, unless we're only going
|
||||
* to add further frozen rows to it.
|
||||
*
|
||||
* If we're only adding already frozen rows to a page that was empty or
|
||||
* marked as all visible, mark it as all-visible.
|
||||
@@ -2258,11 +2258,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
END_CRIT_SECTION();
|
||||
|
||||
/*
|
||||
* If we've frozen everything on the page, update the visibilitymap.
|
||||
* We're already holding pin on the vmbuffer.
|
||||
* If we've frozen everything on the page, update the visibilitymap. We're
|
||||
* already holding pin on the vmbuffer.
|
||||
*
|
||||
* No need to update the visibilitymap if it had all_frozen bit set
|
||||
* before this insertion.
|
||||
* No need to update the visibilitymap if it had all_frozen bit set before
|
||||
* this insertion.
|
||||
*/
|
||||
if (all_frozen_set && ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0))
|
||||
{
|
||||
@@ -2270,14 +2270,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
|
||||
|
||||
/*
|
||||
* It's fine to use InvalidTransactionId here - this is only used
|
||||
* when HEAP_INSERT_FROZEN is specified, which intentionally
|
||||
* violates visibility rules.
|
||||
* It's fine to use InvalidTransactionId here - this is only used when
|
||||
* HEAP_INSERT_FROZEN is specified, which intentionally violates
|
||||
* visibility rules.
|
||||
*/
|
||||
visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
|
||||
InvalidXLogRecPtr, vmbuffer,
|
||||
InvalidTransactionId,
|
||||
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
|
||||
InvalidXLogRecPtr, vmbuffer,
|
||||
InvalidTransactionId,
|
||||
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
|
||||
}
|
||||
|
||||
UnlockReleaseBuffer(buffer);
|
||||
@@ -2547,7 +2547,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
tupledata = scratchptr;
|
||||
|
||||
/* check that the mutually exclusive flags are not both set */
|
||||
Assert (!(all_visible_cleared && all_frozen_set));
|
||||
Assert(!(all_visible_cleared && all_frozen_set));
|
||||
|
||||
xlrec->flags = 0;
|
||||
if (all_visible_cleared)
|
||||
@@ -3063,7 +3063,10 @@ l1:
|
||||
xl_heap_header xlhdr;
|
||||
XLogRecPtr recptr;
|
||||
|
||||
/* For logical decode we need combo CIDs to properly decode the catalog */
|
||||
/*
|
||||
* For logical decode we need combo CIDs to properly decode the
|
||||
* catalog
|
||||
*/
|
||||
if (RelationIsAccessibleInLogicalDecoding(relation))
|
||||
log_heap_new_cid(relation, &tp);
|
||||
|
||||
@@ -7932,16 +7935,16 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
|
||||
* TIDs as each other. The goal is to ignore relatively small differences
|
||||
* in the total number of promising entries, so that the whole process can
|
||||
* give a little weight to heapam factors (like heap block locality)
|
||||
* instead. This isn't a trade-off, really -- we have nothing to lose.
|
||||
* It would be foolish to interpret small differences in npromisingtids
|
||||
* instead. This isn't a trade-off, really -- we have nothing to lose. It
|
||||
* would be foolish to interpret small differences in npromisingtids
|
||||
* values as anything more than noise.
|
||||
*
|
||||
* We tiebreak on nhtids when sorting block group subsets that have the
|
||||
* same npromisingtids, but this has the same issues as npromisingtids,
|
||||
* and so nhtids is subject to the same power-of-two bucketing scheme.
|
||||
* The only reason that we don't fix nhtids in the same way here too is
|
||||
* that we'll need accurate nhtids values after the sort. We handle
|
||||
* nhtids bucketization dynamically instead (in the sort comparator).
|
||||
* and so nhtids is subject to the same power-of-two bucketing scheme. The
|
||||
* only reason that we don't fix nhtids in the same way here too is that
|
||||
* we'll need accurate nhtids values after the sort. We handle nhtids
|
||||
* bucketization dynamically instead (in the sort comparator).
|
||||
*
|
||||
* See bottomup_nblocksfavorable() for a full explanation of when and how
|
||||
* heap locality/favorable blocks can significantly influence when and how
|
||||
@@ -8944,8 +8947,8 @@ heap_xlog_insert(XLogReaderState *record)
|
||||
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
|
||||
|
||||
/* check that the mutually exclusive flags are not both set */
|
||||
Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
|
||||
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
|
||||
Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
|
||||
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
|
||||
|
||||
/*
|
||||
* The visibility map may need to be fixed even if the heap page is
|
||||
@@ -9072,8 +9075,8 @@ heap_xlog_multi_insert(XLogReaderState *record)
|
||||
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
|
||||
|
||||
/* check that the mutually exclusive flags are not both set */
|
||||
Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
|
||||
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
|
||||
Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
|
||||
(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
|
||||
|
||||
/*
|
||||
* The visibility map may need to be fixed even if the heap page is
|
||||
|
||||
@@ -1659,13 +1659,13 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
|
||||
|
||||
/*
|
||||
* If a HOT tuple points to a root that we don't know
|
||||
* about, obtain root items afresh. If that still fails,
|
||||
* report it as corruption.
|
||||
* If a HOT tuple points to a root that we don't know about,
|
||||
* obtain root items afresh. If that still fails, report it as
|
||||
* corruption.
|
||||
*/
|
||||
if (root_offsets[offnum - 1] == InvalidOffsetNumber)
|
||||
{
|
||||
Page page = BufferGetPage(hscan->rs_cbuf);
|
||||
Page page = BufferGetPage(hscan->rs_cbuf);
|
||||
|
||||
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
|
||||
heap_get_root_tuples(page, root_offsets);
|
||||
@@ -2482,8 +2482,8 @@ reform_and_rewrite_tuple(HeapTuple tuple,
|
||||
else if (!isnull[i] && TupleDescAttr(newTupDesc, i)->attlen == -1)
|
||||
{
|
||||
struct varlena *new_value;
|
||||
ToastCompressionId cmid;
|
||||
char cmethod;
|
||||
ToastCompressionId cmid;
|
||||
char cmethod;
|
||||
|
||||
new_value = (struct varlena *) DatumGetPointer(values[i]);
|
||||
cmid = toast_get_compression_id(new_value);
|
||||
|
||||
@@ -1608,8 +1608,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
|
||||
|
||||
/*
|
||||
* another transaction might have (tried to) delete this tuple or
|
||||
* cmin/cmax was stored in a combo CID. So we need to lookup the actual
|
||||
* values externally.
|
||||
* cmin/cmax was stored in a combo CID. So we need to lookup the
|
||||
* actual values externally.
|
||||
*/
|
||||
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
|
||||
htup, buffer,
|
||||
@@ -1629,8 +1629,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
|
||||
* elog inside ResolveCminCmaxDuringDecoding.
|
||||
*
|
||||
* XXX For the streaming case, we can track the largest combo CID
|
||||
* assigned, and error out based on this (when unable to resolve
|
||||
* combo CID below that observed maximum value).
|
||||
* assigned, and error out based on this (when unable to resolve combo
|
||||
* CID below that observed maximum value).
|
||||
*/
|
||||
if (!resolved)
|
||||
return false;
|
||||
@@ -1717,8 +1717,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
|
||||
* elog inside ResolveCminCmaxDuringDecoding.
|
||||
*
|
||||
* XXX For the streaming case, we can track the largest combo CID
|
||||
* assigned, and error out based on this (when unable to resolve
|
||||
* combo CID below that observed maximum value).
|
||||
* assigned, and error out based on this (when unable to resolve combo
|
||||
* CID below that observed maximum value).
|
||||
*/
|
||||
if (!resolved || cmax == InvalidCommandId)
|
||||
return true;
|
||||
|
||||
@@ -410,8 +410,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the FSM knows nothing of the rel, try the last page before we
|
||||
* give up and extend. This avoids one-tuple-per-page syndrome during
|
||||
* If the FSM knows nothing of the rel, try the last page before we give
|
||||
* up and extend. This avoids one-tuple-per-page syndrome during
|
||||
* bootstrapping or in a recently-started system.
|
||||
*/
|
||||
if (targetBlock == InvalidBlockNumber)
|
||||
|
||||
@@ -95,8 +95,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
|
||||
|
||||
/*
|
||||
* We can't write WAL in recovery mode, so there's no point trying to
|
||||
* clean the page. The primary will likely issue a cleaning WAL record soon
|
||||
* anyway, so this is no particular loss.
|
||||
* clean the page. The primary will likely issue a cleaning WAL record
|
||||
* soon anyway, so this is no particular loss.
|
||||
*/
|
||||
if (RecoveryInProgress())
|
||||
return;
|
||||
|
||||
@@ -691,8 +691,8 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
|
||||
*
|
||||
* Deliberately avoid telling the stats collector about LP_DEAD items that
|
||||
* remain in the table due to VACUUM bypassing index and heap vacuuming.
|
||||
* ANALYZE will consider the remaining LP_DEAD items to be dead tuples.
|
||||
* It seems like a good idea to err on the side of not vacuuming again too
|
||||
* ANALYZE will consider the remaining LP_DEAD items to be dead tuples. It
|
||||
* seems like a good idea to err on the side of not vacuuming again too
|
||||
* soon in cases where the failsafe prevented significant amounts of heap
|
||||
* vacuuming.
|
||||
*/
|
||||
@@ -2284,7 +2284,7 @@ static void
|
||||
lazy_vacuum_heap_rel(LVRelState *vacrel)
|
||||
{
|
||||
int tupindex;
|
||||
BlockNumber vacuumed_pages;
|
||||
BlockNumber vacuumed_pages;
|
||||
PGRUsage ru0;
|
||||
Buffer vmbuffer = InvalidBuffer;
|
||||
LVSavedErrInfo saved_err_info;
|
||||
|
||||
@@ -612,8 +612,8 @@ systable_endscan(SysScanDesc sysscan)
|
||||
UnregisterSnapshot(sysscan->snapshot);
|
||||
|
||||
/*
|
||||
* Reset the bsysscan flag at the end of the systable scan. See
|
||||
* detailed comments in xact.c where these variables are declared.
|
||||
* Reset the bsysscan flag at the end of the systable scan. See detailed
|
||||
* comments in xact.c where these variables are declared.
|
||||
*/
|
||||
if (TransactionIdIsValid(CheckXidAlive))
|
||||
bsysscan = false;
|
||||
|
||||
@@ -1054,22 +1054,22 @@ _bt_lockbuf(Relation rel, Buffer buf, int access)
|
||||
LockBuffer(buf, access);
|
||||
|
||||
/*
|
||||
* It doesn't matter that _bt_unlockbuf() won't get called in the
|
||||
* event of an nbtree error (e.g. a unique violation error). That
|
||||
* won't cause Valgrind false positives.
|
||||
* It doesn't matter that _bt_unlockbuf() won't get called in the event of
|
||||
* an nbtree error (e.g. a unique violation error). That won't cause
|
||||
* Valgrind false positives.
|
||||
*
|
||||
* The nbtree client requests are superimposed on top of the
|
||||
* bufmgr.c buffer pin client requests. In the event of an nbtree
|
||||
* error the buffer will certainly get marked as defined when the
|
||||
* backend once again acquires its first pin on the buffer. (Of
|
||||
* course, if the backend never touches the buffer again then it
|
||||
* doesn't matter that it remains non-accessible to Valgrind.)
|
||||
* The nbtree client requests are superimposed on top of the bufmgr.c
|
||||
* buffer pin client requests. In the event of an nbtree error the buffer
|
||||
* will certainly get marked as defined when the backend once again
|
||||
* acquires its first pin on the buffer. (Of course, if the backend never
|
||||
* touches the buffer again then it doesn't matter that it remains
|
||||
* non-accessible to Valgrind.)
|
||||
*
|
||||
* Note: When an IndexTuple C pointer gets computed using an
|
||||
* ItemId read from a page while a lock was held, the C pointer
|
||||
* becomes unsafe to dereference forever as soon as the lock is
|
||||
* released. Valgrind can only detect cases where the pointer
|
||||
* gets dereferenced with no _current_ lock/pin held, though.
|
||||
* Note: When an IndexTuple C pointer gets computed using an ItemId read
|
||||
* from a page while a lock was held, the C pointer becomes unsafe to
|
||||
* dereference forever as soon as the lock is released. Valgrind can only
|
||||
* detect cases where the pointer gets dereferenced with no _current_
|
||||
* lock/pin held, though.
|
||||
*/
|
||||
if (!RelationUsesLocalBuffers(rel))
|
||||
VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
|
||||
@@ -2395,7 +2395,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
while (P_ISDELETED(opaque) || opaque->btpo_next != target)
|
||||
{
|
||||
bool leftsibvalid = true;
|
||||
bool leftsibvalid = true;
|
||||
|
||||
/*
|
||||
* Before we follow the link from the page that was the left
|
||||
|
||||
@@ -898,8 +898,8 @@ btree_xlog_unlink_page(uint8 info, XLogReaderState *record)
|
||||
* top parent link when deleting leafbuf because it's the last page
|
||||
* we'll delete in the subtree undergoing deletion.
|
||||
*/
|
||||
Buffer leafbuf;
|
||||
IndexTupleData trunctuple;
|
||||
Buffer leafbuf;
|
||||
IndexTupleData trunctuple;
|
||||
|
||||
Assert(!isleaf);
|
||||
|
||||
|
||||
@@ -2278,7 +2278,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid,
|
||||
/* Log the info */
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("MultiXactId wrap limit is %u, limited by database with OID %u",
|
||||
multiWrapLimit, oldest_datoid)));
|
||||
multiWrapLimit, oldest_datoid)));
|
||||
|
||||
/*
|
||||
* Computing the actual limits is only possible once the data directory is
|
||||
@@ -2612,7 +2612,7 @@ SetOffsetVacuumLimit(bool is_startup)
|
||||
if (oldestOffsetKnown)
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("oldest MultiXactId member is at offset %u",
|
||||
oldestOffset)));
|
||||
oldestOffset)));
|
||||
else
|
||||
ereport(LOG,
|
||||
(errmsg("MultiXact member wraparound protections are disabled because oldest checkpointed MultiXact %u does not exist on disk",
|
||||
@@ -2641,7 +2641,7 @@ SetOffsetVacuumLimit(bool is_startup)
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("MultiXact member stop limit is now %u based on MultiXact %u",
|
||||
offsetStopLimit, oldestMultiXactId)));
|
||||
offsetStopLimit, oldestMultiXactId)));
|
||||
}
|
||||
else if (prevOldestOffsetKnown)
|
||||
{
|
||||
@@ -3283,9 +3283,9 @@ multixact_redo(XLogReaderState *record)
|
||||
xlrec->moff + xlrec->nmembers);
|
||||
|
||||
/*
|
||||
* Make sure nextXid is beyond any XID mentioned in the record.
|
||||
* This should be unnecessary, since any XID found here ought to have
|
||||
* other evidence in the XLOG, but let's be safe.
|
||||
* Make sure nextXid is beyond any XID mentioned in the record. This
|
||||
* should be unnecessary, since any XID found here ought to have other
|
||||
* evidence in the XLOG, but let's be safe.
|
||||
*/
|
||||
max_xid = XLogRecGetXid(record);
|
||||
for (i = 0; i < xlrec->nmembers; i++)
|
||||
|
||||
@@ -1134,9 +1134,9 @@ EndPrepare(GlobalTransaction gxact)
|
||||
gxact->prepare_start_lsn = ProcLastRecPtr;
|
||||
|
||||
/*
|
||||
* Mark the prepared transaction as valid. As soon as xact.c marks
|
||||
* MyProc as not running our XID (which it will do immediately after
|
||||
* this function returns), others can commit/rollback the xact.
|
||||
* Mark the prepared transaction as valid. As soon as xact.c marks MyProc
|
||||
* as not running our XID (which it will do immediately after this
|
||||
* function returns), others can commit/rollback the xact.
|
||||
*
|
||||
* NB: a side effect of this is to make a dummy ProcArray entry for the
|
||||
* prepared XID. This must happen before we clear the XID from MyProc /
|
||||
|
||||
@@ -179,10 +179,10 @@ GetNewTransactionId(bool isSubXact)
|
||||
ExtendSUBTRANS(xid);
|
||||
|
||||
/*
|
||||
* Now advance the nextXid counter. This must not happen until after
|
||||
* we have successfully completed ExtendCLOG() --- if that routine fails,
|
||||
* we want the next incoming transaction to try it again. We cannot
|
||||
* assign more XIDs until there is CLOG space for them.
|
||||
* Now advance the nextXid counter. This must not happen until after we
|
||||
* have successfully completed ExtendCLOG() --- if that routine fails, we
|
||||
* want the next incoming transaction to try it again. We cannot assign
|
||||
* more XIDs until there is CLOG space for them.
|
||||
*/
|
||||
FullTransactionIdAdvance(&ShmemVariableCache->nextXid);
|
||||
|
||||
@@ -192,8 +192,8 @@ GetNewTransactionId(bool isSubXact)
|
||||
* latestCompletedXid is present in the ProcArray, which is essential for
|
||||
* correct OldestXmin tracking; see src/backend/access/transam/README.
|
||||
*
|
||||
* Note that readers of ProcGlobal->xids/PGPROC->xid should be careful
|
||||
* to fetch the value for each proc only once, rather than assume they can
|
||||
* Note that readers of ProcGlobal->xids/PGPROC->xid should be careful to
|
||||
* fetch the value for each proc only once, rather than assume they can
|
||||
* read a value multiple times and get the same answer each time. Note we
|
||||
* are assuming that TransactionId and int fetch/store are atomic.
|
||||
*
|
||||
@@ -281,9 +281,9 @@ AdvanceNextFullTransactionIdPastXid(TransactionId xid)
|
||||
uint32 epoch;
|
||||
|
||||
/*
|
||||
* It is safe to read nextXid without a lock, because this is only
|
||||
* called from the startup process or single-process mode, meaning that no
|
||||
* other process can modify it.
|
||||
* It is safe to read nextXid without a lock, because this is only called
|
||||
* from the startup process or single-process mode, meaning that no other
|
||||
* process can modify it.
|
||||
*/
|
||||
Assert(AmStartupProcess() || !IsUnderPostmaster);
|
||||
|
||||
@@ -426,7 +426,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
|
||||
/* Log the info */
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("transaction ID wrap limit is %u, limited by database with OID %u",
|
||||
xidWrapLimit, oldest_datoid)));
|
||||
xidWrapLimit, oldest_datoid)));
|
||||
|
||||
/*
|
||||
* If past the autovacuum force point, immediately signal an autovac
|
||||
@@ -617,8 +617,8 @@ AssertTransactionIdInAllowableRange(TransactionId xid)
|
||||
* We can't acquire XidGenLock, as this may be called with XidGenLock
|
||||
* already held (or with other locks that don't allow XidGenLock to be
|
||||
* nested). That's ok for our purposes though, since we already rely on
|
||||
* 32bit reads to be atomic. While nextXid is 64 bit, we only look at
|
||||
* the lower 32bit, so a skewed read doesn't hurt.
|
||||
* 32bit reads to be atomic. While nextXid is 64 bit, we only look at the
|
||||
* lower 32bit, so a skewed read doesn't hurt.
|
||||
*
|
||||
* There's no increased danger of falling outside [oldest, next] by
|
||||
* accessing them without a lock. xid needs to have been created with
|
||||
|
||||
@@ -723,7 +723,7 @@ typedef struct XLogCtlData
|
||||
*/
|
||||
TimestampTz currentChunkStartTime;
|
||||
/* Recovery pause state */
|
||||
RecoveryPauseState recoveryPauseState;
|
||||
RecoveryPauseState recoveryPauseState;
|
||||
ConditionVariable recoveryNotPausedCV;
|
||||
|
||||
/*
|
||||
@@ -2858,8 +2858,8 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
|
||||
|
||||
ereport(DEBUG2,
|
||||
(errmsg_internal("updated min recovery point to %X/%X on timeline %u",
|
||||
LSN_FORMAT_ARGS(minRecoveryPoint),
|
||||
newMinRecoveryPointTLI)));
|
||||
LSN_FORMAT_ARGS(minRecoveryPoint),
|
||||
newMinRecoveryPointTLI)));
|
||||
}
|
||||
}
|
||||
LWLockRelease(ControlFileLock);
|
||||
@@ -3357,7 +3357,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
|
||||
blocks = wal_segment_size / XLOG_BLCKSZ;
|
||||
for (int i = 0; i < blocks;)
|
||||
{
|
||||
int iovcnt = Min(blocks - i, lengthof(iov));
|
||||
int iovcnt = Min(blocks - i, lengthof(iov));
|
||||
off_t offset = i * XLOG_BLCKSZ;
|
||||
|
||||
if (pg_pwritev_with_retry(fd, iov, iovcnt, offset) < 0)
|
||||
@@ -3814,8 +3814,8 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, XLogSource source)
|
||||
* however, unless we actually find a valid segment. That way if there is
|
||||
* neither a timeline history file nor a WAL segment in the archive, and
|
||||
* streaming replication is set up, we'll read the timeline history file
|
||||
* streamed from the primary when we start streaming, instead of recovering
|
||||
* with a dummy history generated here.
|
||||
* streamed from the primary when we start streaming, instead of
|
||||
* recovering with a dummy history generated here.
|
||||
*/
|
||||
if (expectedTLEs)
|
||||
tles = expectedTLEs;
|
||||
@@ -4229,7 +4229,7 @@ RemoveXlogFile(const char *segname, XLogSegNo recycleSegNo,
|
||||
{
|
||||
ereport(DEBUG2,
|
||||
(errmsg_internal("recycled write-ahead log file \"%s\"",
|
||||
segname)));
|
||||
segname)));
|
||||
CheckpointStats.ckpt_segs_recycled++;
|
||||
/* Needn't recheck that slot on future iterations */
|
||||
(*endlogSegNo)++;
|
||||
@@ -4241,7 +4241,7 @@ RemoveXlogFile(const char *segname, XLogSegNo recycleSegNo,
|
||||
|
||||
ereport(DEBUG2,
|
||||
(errmsg_internal("removing write-ahead log file \"%s\"",
|
||||
segname)));
|
||||
segname)));
|
||||
|
||||
#ifdef WIN32
|
||||
|
||||
@@ -6093,7 +6093,7 @@ recoveryPausesHere(bool endOfRecovery)
|
||||
RecoveryPauseState
|
||||
GetRecoveryPauseState(void)
|
||||
{
|
||||
RecoveryPauseState state;
|
||||
RecoveryPauseState state;
|
||||
|
||||
SpinLockAcquire(&XLogCtl->info_lck);
|
||||
state = XLogCtl->recoveryPauseState;
|
||||
@@ -6347,7 +6347,11 @@ RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue
|
||||
ereport(WARNING,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("promotion is not possible because of insufficient parameter settings"),
|
||||
/* Repeat the detail from above so it's easy to find in the log. */
|
||||
|
||||
/*
|
||||
* Repeat the detail from above so it's easy to find
|
||||
* in the log.
|
||||
*/
|
||||
errdetail("%s = %d is a lower setting than on the primary server, where its value was %d.",
|
||||
param_name,
|
||||
currValue,
|
||||
@@ -6357,15 +6361,15 @@ RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue
|
||||
}
|
||||
|
||||
/*
|
||||
* If recovery pause is requested then set it paused. While we
|
||||
* are in the loop, user might resume and pause again so set
|
||||
* this every time.
|
||||
* If recovery pause is requested then set it paused. While
|
||||
* we are in the loop, user might resume and pause again so
|
||||
* set this every time.
|
||||
*/
|
||||
ConfirmRecoveryPaused();
|
||||
|
||||
/*
|
||||
* We wait on a condition variable that will wake us as soon as
|
||||
* the pause ends, but we use a timeout so we can check the
|
||||
* We wait on a condition variable that will wake us as soon
|
||||
* as the pause ends, but we use a timeout so we can check the
|
||||
* above conditions periodically too.
|
||||
*/
|
||||
ConditionVariableTimedSleep(&XLogCtl->recoveryNotPausedCV, 1000,
|
||||
@@ -6377,7 +6381,7 @@ RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("recovery aborted because of insufficient parameter settings"),
|
||||
/* Repeat the detail from above so it's easy to find in the log. */
|
||||
/* Repeat the detail from above so it's easy to find in the log. */
|
||||
errdetail("%s = %d is a lower setting than on the primary server, where its value was %d.",
|
||||
param_name,
|
||||
currValue,
|
||||
@@ -6920,9 +6924,8 @@ StartupXLOG(void)
|
||||
StartupReorderBuffer();
|
||||
|
||||
/*
|
||||
* Startup CLOG. This must be done after ShmemVariableCache->nextXid
|
||||
* has been initialized and before we accept connections or begin WAL
|
||||
* replay.
|
||||
* Startup CLOG. This must be done after ShmemVariableCache->nextXid has
|
||||
* been initialized and before we accept connections or begin WAL replay.
|
||||
*/
|
||||
StartupCLOG();
|
||||
|
||||
@@ -6969,11 +6972,11 @@ StartupXLOG(void)
|
||||
* ourselves - the history file of the recovery target timeline covers all
|
||||
* the previous timelines in the history too - a cascading standby server
|
||||
* might be interested in them. Or, if you archive the WAL from this
|
||||
* server to a different archive than the primary, it'd be good for all the
|
||||
* history files to get archived there after failover, so that you can use
|
||||
* one of the old timelines as a PITR target. Timeline history files are
|
||||
* small, so it's better to copy them unnecessarily than not copy them and
|
||||
* regret later.
|
||||
* server to a different archive than the primary, it'd be good for all
|
||||
* the history files to get archived there after failover, so that you can
|
||||
* use one of the old timelines as a PITR target. Timeline history files
|
||||
* are small, so it's better to copy them unnecessarily than not copy them
|
||||
* and regret later.
|
||||
*/
|
||||
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
|
||||
|
||||
@@ -7196,9 +7199,9 @@ StartupXLOG(void)
|
||||
ProcArrayInitRecovery(XidFromFullTransactionId(ShmemVariableCache->nextXid));
|
||||
|
||||
/*
|
||||
* Startup subtrans only. CLOG, MultiXact and commit
|
||||
* timestamp have already been started up and other SLRUs are not
|
||||
* maintained during recovery and need not be started yet.
|
||||
* Startup subtrans only. CLOG, MultiXact and commit timestamp
|
||||
* have already been started up and other SLRUs are not maintained
|
||||
* during recovery and need not be started yet.
|
||||
*/
|
||||
StartupSUBTRANS(oldestActiveXID);
|
||||
|
||||
@@ -7400,8 +7403,7 @@ StartupXLOG(void)
|
||||
error_context_stack = &errcallback;
|
||||
|
||||
/*
|
||||
* ShmemVariableCache->nextXid must be beyond record's
|
||||
* xid.
|
||||
* ShmemVariableCache->nextXid must be beyond record's xid.
|
||||
*/
|
||||
AdvanceNextFullTransactionIdPastXid(record->xl_xid);
|
||||
|
||||
@@ -8092,10 +8094,10 @@ StartupXLOG(void)
|
||||
WalSndWakeup();
|
||||
|
||||
/*
|
||||
* If this was a promotion, request an (online) checkpoint now. This
|
||||
* isn't required for consistency, but the last restartpoint might be far
|
||||
* back, and in case of a crash, recovering from it might take a longer
|
||||
* than is appropriate now that we're not in standby mode anymore.
|
||||
* If this was a promotion, request an (online) checkpoint now. This isn't
|
||||
* required for consistency, but the last restartpoint might be far back,
|
||||
* and in case of a crash, recovering from it might take a longer than is
|
||||
* appropriate now that we're not in standby mode anymore.
|
||||
*/
|
||||
if (promoted)
|
||||
RequestCheckpoint(CHECKPOINT_FORCE);
|
||||
@@ -8674,7 +8676,7 @@ LogCheckpointStart(int flags, bool restartpoint)
|
||||
{
|
||||
if (restartpoint)
|
||||
ereport(LOG,
|
||||
/* translator: the placeholders show checkpoint options */
|
||||
/* translator: the placeholders show checkpoint options */
|
||||
(errmsg("restartpoint starting:%s%s%s%s%s%s%s%s",
|
||||
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
|
||||
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
|
||||
@@ -8686,7 +8688,7 @@ LogCheckpointStart(int flags, bool restartpoint)
|
||||
(flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "")));
|
||||
else
|
||||
ereport(LOG,
|
||||
/* translator: the placeholders show checkpoint options */
|
||||
/* translator: the placeholders show checkpoint options */
|
||||
(errmsg("checkpoint starting:%s%s%s%s%s%s%s%s",
|
||||
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
|
||||
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
|
||||
@@ -11851,12 +11853,12 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
|
||||
if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("backup time %s in file \"%s\"",
|
||||
backuptime, BACKUP_LABEL_FILE)));
|
||||
backuptime, BACKUP_LABEL_FILE)));
|
||||
|
||||
if (fscanf(lfp, "LABEL: %1023[^\n]\n", backuplabel) == 1)
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("backup label %s in file \"%s\"",
|
||||
backuplabel, BACKUP_LABEL_FILE)));
|
||||
backuplabel, BACKUP_LABEL_FILE)));
|
||||
|
||||
/*
|
||||
* START TIMELINE is new as of 11. Its parsing is not mandatory, still use
|
||||
@@ -11873,7 +11875,7 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("backup timeline %u in file \"%s\"",
|
||||
tli_from_file, BACKUP_LABEL_FILE)));
|
||||
tli_from_file, BACKUP_LABEL_FILE)));
|
||||
}
|
||||
|
||||
if (ferror(lfp) || FreeFile(lfp))
|
||||
@@ -12177,8 +12179,8 @@ retry:
|
||||
Assert(readFile != -1);
|
||||
|
||||
/*
|
||||
* If the current segment is being streamed from the primary, calculate how
|
||||
* much of the current page we have received already. We know the
|
||||
* If the current segment is being streamed from the primary, calculate
|
||||
* how much of the current page we have received already. We know the
|
||||
* requested record has been received, but this is for the benefit of
|
||||
* future calls, to allow quick exit at the top of this function.
|
||||
*/
|
||||
@@ -12239,12 +12241,13 @@ retry:
|
||||
* and replay reaches a record that's split across two WAL segments. The
|
||||
* first page is only available locally, in pg_wal, because it's already
|
||||
* been recycled on the primary. The second page, however, is not present
|
||||
* in pg_wal, and we should stream it from the primary. There is a recycled
|
||||
* WAL segment present in pg_wal, with garbage contents, however. We would
|
||||
* read the first page from the local WAL segment, but when reading the
|
||||
* second page, we would read the bogus, recycled, WAL segment. If we
|
||||
* didn't catch that case here, we would never recover, because
|
||||
* ReadRecord() would retry reading the whole record from the beginning.
|
||||
* in pg_wal, and we should stream it from the primary. There is a
|
||||
* recycled WAL segment present in pg_wal, with garbage contents, however.
|
||||
* We would read the first page from the local WAL segment, but when
|
||||
* reading the second page, we would read the bogus, recycled, WAL
|
||||
* segment. If we didn't catch that case here, we would never recover,
|
||||
* because ReadRecord() would retry reading the whole record from the
|
||||
* beginning.
|
||||
*
|
||||
* Of course, this only catches errors in the page header, which is what
|
||||
* happens in the case of a recycled WAL segment. Other kinds of errors or
|
||||
@@ -12399,15 +12402,15 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
|
||||
* Failure while streaming. Most likely, we got here
|
||||
* because streaming replication was terminated, or
|
||||
* promotion was triggered. But we also get here if we
|
||||
* find an invalid record in the WAL streamed from the primary,
|
||||
* in which case something is seriously wrong. There's
|
||||
* little chance that the problem will just go away, but
|
||||
* PANIC is not good for availability either, especially
|
||||
* in hot standby mode. So, we treat that the same as
|
||||
* disconnection, and retry from archive/pg_wal again. The
|
||||
* WAL in the archive should be identical to what was
|
||||
* streamed, so it's unlikely that it helps, but one can
|
||||
* hope...
|
||||
* find an invalid record in the WAL streamed from the
|
||||
* primary, in which case something is seriously wrong.
|
||||
* There's little chance that the problem will just go
|
||||
* away, but PANIC is not good for availability either,
|
||||
* especially in hot standby mode. So, we treat that the
|
||||
* same as disconnection, and retry from archive/pg_wal
|
||||
* again. The WAL in the archive should be identical to
|
||||
* what was streamed, so it's unlikely that it helps, but
|
||||
* one can hope...
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
@@ -600,7 +600,7 @@ pg_is_wal_replay_paused(PG_FUNCTION_ARGS)
|
||||
Datum
|
||||
pg_get_wal_replay_pause_state(PG_FUNCTION_ARGS)
|
||||
{
|
||||
char *statestr = NULL;
|
||||
char *statestr = NULL;
|
||||
|
||||
if (!RecoveryInProgress())
|
||||
ereport(ERROR,
|
||||
@@ -609,7 +609,7 @@ pg_get_wal_replay_pause_state(PG_FUNCTION_ARGS)
|
||||
errhint("Recovery control functions can only be executed during recovery.")));
|
||||
|
||||
/* get the recovery pause state */
|
||||
switch(GetRecoveryPauseState())
|
||||
switch (GetRecoveryPauseState())
|
||||
{
|
||||
case RECOVERY_NOT_PAUSED:
|
||||
statestr = "not paused";
|
||||
|
||||
@@ -1065,8 +1065,8 @@ log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
|
||||
for (j = batch_start; j < i; j++)
|
||||
{
|
||||
/*
|
||||
* The page may be uninitialized. If so, we can't set the LSN because that
|
||||
* would corrupt the page.
|
||||
* The page may be uninitialized. If so, we can't set the LSN
|
||||
* because that would corrupt the page.
|
||||
*/
|
||||
if (!PageIsNew(pages[j]))
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user