mirror of
https://github.com/postgres/postgres.git
synced 2025-11-09 06:21:09 +03:00
Post-PG 10 beta1 pgindent run
perltidy run not included.
This commit is contained in:
@@ -199,13 +199,13 @@ cleanup_directories_atexit(void)
|
||||
|
||||
if (made_new_xlogdir || found_existing_xlogdir)
|
||||
fprintf(stderr,
|
||||
_("%s: WAL directory \"%s\" not removed at user's request\n"),
|
||||
_("%s: WAL directory \"%s\" not removed at user's request\n"),
|
||||
progname, xlog_dir);
|
||||
}
|
||||
|
||||
if (made_tablespace_dirs || found_tablespace_dirs)
|
||||
fprintf(stderr,
|
||||
_("%s: changes to tablespace directories will not be undone\n"),
|
||||
_("%s: changes to tablespace directories will not be undone\n"),
|
||||
progname);
|
||||
}
|
||||
|
||||
@@ -334,7 +334,7 @@ usage(void)
|
||||
printf(_(" -r, --max-rate=RATE maximum transfer rate to transfer data directory\n"
|
||||
" (in kB/s, or use suffix \"k\" or \"M\")\n"));
|
||||
printf(_(" -R, --write-recovery-conf\n"
|
||||
" write recovery.conf for replication\n"));
|
||||
" write recovery.conf for replication\n"));
|
||||
printf(_(" -S, --slot=SLOTNAME replication slot to use\n"));
|
||||
printf(_(" --no-slot prevent creation of temporary replication slot\n"));
|
||||
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
|
||||
@@ -578,7 +578,7 @@ StartLogStreamer(char *startpos, uint32 timeline, char *sysidentifier)
|
||||
snprintf(param->xlog, sizeof(param->xlog), "%s/%s",
|
||||
basedir,
|
||||
PQserverVersion(conn) < MINIMUM_VERSION_FOR_PG_WAL ?
|
||||
"pg_xlog" : "pg_wal");
|
||||
"pg_xlog" : "pg_wal");
|
||||
|
||||
/* Temporary replication slots are only supported in 10 and newer */
|
||||
if (PQserverVersion(conn) < MINIMUM_VERSION_FOR_TEMP_SLOTS)
|
||||
@@ -590,9 +590,9 @@ StartLogStreamer(char *startpos, uint32 timeline, char *sysidentifier)
|
||||
{
|
||||
/*
|
||||
* Create pg_wal/archive_status or pg_xlog/archive_status (and thus
|
||||
* pg_wal or pg_xlog) depending on the target server so we can write to
|
||||
* basedir/pg_wal or basedir/pg_xlog as the directory entry in the tar
|
||||
* file may arrive later.
|
||||
* pg_wal or pg_xlog) depending on the target server so we can write
|
||||
* to basedir/pg_wal or basedir/pg_xlog as the directory entry in the
|
||||
* tar file may arrive later.
|
||||
*/
|
||||
snprintf(statusdir, sizeof(statusdir), "%s/%s/archive_status",
|
||||
basedir,
|
||||
@@ -1403,16 +1403,16 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
|
||||
{
|
||||
/*
|
||||
* When streaming WAL, pg_wal (or pg_xlog for pre-9.6
|
||||
* clusters) will have been created by the wal receiver
|
||||
* process. Also, when the WAL directory location
|
||||
* was specified, pg_wal (or pg_xlog) has already been
|
||||
* created as a symbolic link before starting the actual
|
||||
* backup. So just ignore creation failures on related
|
||||
* directories.
|
||||
* clusters) will have been created by the wal
|
||||
* receiver process. Also, when the WAL directory
|
||||
* location was specified, pg_wal (or pg_xlog) has
|
||||
* already been created as a symbolic link before
|
||||
* starting the actual backup. So just ignore creation
|
||||
* failures on related directories.
|
||||
*/
|
||||
if (!((pg_str_endswith(filename, "/pg_wal") ||
|
||||
pg_str_endswith(filename, "/pg_xlog")||
|
||||
pg_str_endswith(filename, "/archive_status")) &&
|
||||
pg_str_endswith(filename, "/pg_xlog") ||
|
||||
pg_str_endswith(filename, "/archive_status")) &&
|
||||
errno == EEXIST))
|
||||
{
|
||||
fprintf(stderr,
|
||||
@@ -1758,7 +1758,7 @@ BaseBackup(void)
|
||||
|
||||
if (verbose)
|
||||
fprintf(stderr,
|
||||
_("%s: initiating base backup, waiting for checkpoint to complete\n"),
|
||||
_("%s: initiating base backup, waiting for checkpoint to complete\n"),
|
||||
progname);
|
||||
|
||||
if (showprogress && !verbose)
|
||||
@@ -2041,11 +2041,11 @@ BaseBackup(void)
|
||||
PQfinish(conn);
|
||||
|
||||
/*
|
||||
* Make data persistent on disk once backup is completed. For tar
|
||||
* format once syncing the parent directory is fine, each tar file
|
||||
* created per tablespace has been already synced. In plain format,
|
||||
* all the data of the base directory is synced, taking into account
|
||||
* all the tablespaces. Errors are not considered fatal.
|
||||
* Make data persistent on disk once backup is completed. For tar format
|
||||
* once syncing the parent directory is fine, each tar file created per
|
||||
* tablespace has been already synced. In plain format, all the data of
|
||||
* the base directory is synced, taking into account all the tablespaces.
|
||||
* Errors are not considered fatal.
|
||||
*/
|
||||
if (do_sync)
|
||||
{
|
||||
@@ -2171,7 +2171,7 @@ main(int argc, char **argv)
|
||||
includewal = NO_WAL;
|
||||
}
|
||||
else if (strcmp(optarg, "f") == 0 ||
|
||||
strcmp(optarg, "fetch") == 0)
|
||||
strcmp(optarg, "fetch") == 0)
|
||||
{
|
||||
includewal = FETCH_WAL;
|
||||
}
|
||||
@@ -2312,7 +2312,7 @@ main(int argc, char **argv)
|
||||
if (format == 't' && includewal == STREAM_WAL && strcmp(basedir, "-") == 0)
|
||||
{
|
||||
fprintf(stderr,
|
||||
_("%s: cannot stream write-ahead logs in tar mode to stdout\n"),
|
||||
_("%s: cannot stream write-ahead logs in tar mode to stdout\n"),
|
||||
progname);
|
||||
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
|
||||
progname);
|
||||
@@ -2401,12 +2401,12 @@ main(int argc, char **argv)
|
||||
verify_dir_is_empty_or_create(xlog_dir, &made_new_xlogdir, &found_existing_xlogdir);
|
||||
|
||||
/*
|
||||
* Form name of the place where the symlink must go. pg_xlog has
|
||||
* been renamed to pg_wal in post-10 clusters.
|
||||
* Form name of the place where the symlink must go. pg_xlog has been
|
||||
* renamed to pg_wal in post-10 clusters.
|
||||
*/
|
||||
linkloc = psprintf("%s/%s", basedir,
|
||||
PQserverVersion(conn) < MINIMUM_VERSION_FOR_PG_WAL ?
|
||||
"pg_xlog" : "pg_wal");
|
||||
PQserverVersion(conn) < MINIMUM_VERSION_FOR_PG_WAL ?
|
||||
"pg_xlog" : "pg_wal");
|
||||
|
||||
#ifdef HAVE_SYMLINK
|
||||
if (symlink(xlog_dir, linkloc) != 0)
|
||||
|
||||
Reference in New Issue
Block a user