diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 66f7a523c30..8a33331d035 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -387,7 +387,7 @@ RelationGetBufferForTuple(Relation relation, Size len, * on, as cached in the BulkInsertState or relcache entry. If that * doesn't work, we ask the Free Space Map to locate a suitable page. * Since the FSM's info might be out of date, we have to be prepared to - * loop around and retry multiple times. (To insure this isn't an infinite + * loop around and retry multiple times. (To ensure this isn't an infinite * loop, we must update the FSM with the correct amount of free space on * each page that proves not to be suitable.) If the FSM has no record of * a page with enough free space, we give up and extend the relation. diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index eec3891dc28..d64cff96ab2 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1226,7 +1226,7 @@ static struct config_bool ConfigureNamesBool[] = {"fsync", PGC_SIGHUP, WAL_SETTINGS, gettext_noop("Forces synchronization of updates to disk."), gettext_noop("The server will use the fsync() system call in several places to make " - "sure that updates are physically written to disk. This insures " + "sure that updates are physically written to disk. This ensures " "that a database cluster will recover to a consistent state after " "an operating system or hardware crash.") }, diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index 33fcaf5c9a8..905b6d1ab50 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -150,7 +150,7 @@ typedef struct buftag * is held. Thus buffer header lock holder can do complex updates of the * state variable in single write, simultaneously with lock release (cleaning * BM_LOCKED flag). On the other hand, updating of state without holding - * buffer header lock is restricted to CAS, which insure that BM_LOCKED flag + * buffer header lock is restricted to CAS, which ensures that BM_LOCKED flag * is not set. Atomic increment/decrement, OR/AND etc. are not allowed. * * An exception is that if we have the buffer pinned, its tag can't change