mirror of
https://github.com/postgres/postgres.git
synced 2025-10-29 22:49:41 +03:00
Re-add GUC track_wal_io_timing
This commit is a rework of 2421e9a51d, about which Andres Freund has
raised some concerns as it is valuable to have both track_io_timing and
track_wal_io_timing in some cases, as the WAL write and fsync paths can
be a major bottleneck for some workloads. Hence, it can be relevant to
not calculate the WAL timings in environments where pg_test_timing
performs poorly while capturing some IO data under track_io_timing for
the non-WAL IO paths. The opposite can be also true: it should be
possible to disable the non-WAL timings and enable the WAL timings (the
previous GUC setups allowed this possibility).
track_wal_io_timing is added back in this commit, controlling if WAL
timings should be calculated in pg_stat_io for the read, fsync and write
paths, as done previously with pg_stat_wal. pg_stat_wal previously
tracked only the sync and write parts (now removed), read stats is new
data tracked in pg_stat_io, all three are aggregated if
track_wal_io_timing is enabled. The read part matters during recovery
or if a XLogReader is used.
Extra note: more control over if the types of timings calculated in
pg_stat_io could be done with a GUC that lists pairs of (IOObject,IOOp).
Reported-by: Andres Freund <andres@anarazel.de>
Author: Bertrand Drouvot <bertranddrouvot.pg@gmail.com>
Co-authored-by: Michael Paquier <michael@paquier.xyz>
Discussion: https://postgr.es/m/3opf2wh2oljco6ldyqf7ukabw3jijnnhno6fjb4mlu6civ5h24@fcwmhsgmlmzu
This commit is contained in:
@@ -1509,7 +1509,7 @@ WaitReadBuffers(ReadBuffersOperation *operation)
|
||||
io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
|
||||
}
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
smgrreadv(operation->smgr, forknum, io_first_block, io_pages, io_buffers_len);
|
||||
pgstat_count_io_op_time(io_object, io_context, IOOP_READ, io_start,
|
||||
1, io_buffers_len * BLCKSZ);
|
||||
@@ -2402,7 +2402,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
|
||||
}
|
||||
}
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
/*
|
||||
* Note: if smgrzeroextend fails, we will end up with buffers that are
|
||||
@@ -3857,7 +3857,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object,
|
||||
*/
|
||||
bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
/*
|
||||
* bufToWrite is either the shared buffer or a copy, as appropriate.
|
||||
@@ -4459,7 +4459,7 @@ FlushRelationBuffers(Relation rel)
|
||||
|
||||
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
smgrwrite(srel,
|
||||
BufTagGetForkNum(&bufHdr->tag),
|
||||
@@ -5905,7 +5905,7 @@ IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context)
|
||||
sort_pending_writebacks(wb_context->pending_writebacks,
|
||||
wb_context->nr_pending);
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
/*
|
||||
* Coalesce neighbouring writes, but nothing else. For that we iterate
|
||||
|
||||
@@ -244,7 +244,7 @@ GetLocalVictimBuffer(void)
|
||||
|
||||
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
/* And write... */
|
||||
smgrwrite(oreln,
|
||||
@@ -414,7 +414,7 @@ ExtendBufferedRelLocal(BufferManagerRelation bmr,
|
||||
}
|
||||
}
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
/* actually extend relation */
|
||||
smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
|
||||
|
||||
@@ -1391,7 +1391,7 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
|
||||
ereport(DEBUG1,
|
||||
(errmsg_internal("could not forward fsync request because request queue is full")));
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
if (FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) < 0)
|
||||
ereport(data_sync_elevel(ERROR),
|
||||
@@ -1790,7 +1790,7 @@ mdsyncfiletag(const FileTag *ftag, char *path)
|
||||
need_to_close = true;
|
||||
}
|
||||
|
||||
io_start = pgstat_prepare_io_time();
|
||||
io_start = pgstat_prepare_io_time(track_io_timing);
|
||||
|
||||
/* Sync the file. */
|
||||
result = FileSync(file, WAIT_EVENT_DATA_FILE_SYNC);
|
||||
|
||||
Reference in New Issue
Block a user