diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 3a062a145ca..cc18b0bbf0a 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -3621,7 +3621,7 @@ include_dir 'conf.d'
         pool after that.  However, on file systems with a block size larger
         than
         PostgreSQL's, prefetching can avoid a
-        costly read-before-write when a blocks are later written.
+        costly read-before-write when blocks are later written.
         The default is off.
        
       
diff --git a/doc/src/sgml/wal.sgml b/doc/src/sgml/wal.sgml
index 24cf567ee2e..36e00c92c26 100644
--- a/doc/src/sgml/wal.sgml
+++ b/doc/src/sgml/wal.sgml
@@ -816,9 +816,7 @@
    prefetching mechanism is most likely to be effective on systems
    with full_page_writes set to
    off (where that is safe), and where the working
-   set is larger than RAM.  By default, prefetching in recovery is enabled
-   on operating systems that have posix_fadvise
-   support.
+   set is larger than RAM.  By default, prefetching in recovery is disabled.
   
  
 
diff --git a/src/backend/access/transam/xlogprefetch.c b/src/backend/access/transam/xlogprefetch.c
index 28764326bcc..2178c9086e6 100644
--- a/src/backend/access/transam/xlogprefetch.c
+++ b/src/backend/access/transam/xlogprefetch.c
@@ -31,12 +31,14 @@
  * stall; this is counted with "skip_fpw".
  *
  * The only way we currently have to know that an I/O initiated with
- * PrefetchSharedBuffer() has that recovery will eventually call ReadBuffer(),
- * and perform a synchronous read.  Therefore, we track the number of
+ * PrefetchSharedBuffer() has completed is to wait for the corresponding call
+ * to XLogReadBufferInRedo() to return.  Therefore, we track the number of
  * potentially in-flight I/Os by using a circular buffer of LSNs.  When it's
- * full, we have to wait for recovery to replay records so that the queue
- * depth can be reduced, before we can do any more prefetching.  Ideally, this
- * keeps us the right distance ahead to respect maintenance_io_concurrency.
+ * full, we have to wait for recovery to replay enough records to remove some
+ * LSNs, and only then can we initiate more prefetching.  Ideally, this keeps
+ * us just the right distance ahead to respect maintenance_io_concurrency,
+ * though in practice it errs on the side of being too conservative because
+ * many I/Os complete sooner than we know.
  *
  *-------------------------------------------------------------------------
  */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 46f1d6406f5..6dd889a7c0e 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -2774,7 +2774,7 @@ static struct config_int ConfigureNamesInt[] =
 	{
 		{"wal_decode_buffer_size", PGC_POSTMASTER, WAL_ARCHIVE_RECOVERY,
 			gettext_noop("Maximum buffer size for reading ahead in the WAL during recovery."),
-			gettext_noop("This controls the maximum distance we can read ahead n the WAL to prefetch referenced blocks."),
+			gettext_noop("This controls the maximum distance we can read ahead in the WAL to prefetch referenced blocks."),
 			GUC_UNIT_BYTE
 		},
 		&wal_decode_buffer_size,