mirror of
https://github.com/postgres/postgres.git
synced 2025-05-01 01:04:50 +03:00
Back-patch recent pg_upgrade fixes into 9.2.
This syncs contrib/pg_upgrade in the 9.2 branch with HEAD, except for the HEAD changes related to converting XLogRecPtr to 64-bit int. It includes back-patching these commits: 666d494d19dbd5dc7a177709a2f7069913f8ab89 pg_upgrade: abstract out copying of files from old cluster to new 7afa8bed65ea925208f128048f3a528a64e1319a pg_upgrade: Run the created scripts in the test suite ab577e63faf792593ca728625a8ef0b1dfaf7500 Remove analyze_new_cluster.sh on make clean, too 34c02044ed7e7defde5a853b26dcd806c872d974 Fix thinko in comment 088c065ce8e405fafbfa966937184ece9defcf20 pg_upgrade: Fix exec_prog API to be less flaky f763b77193b04eba03a1f4ce46df34dc0348419e Fix pg_upgrade to cope with non-default unix_socket_directory scenarios.
This commit is contained in:
parent
b681a874d9
commit
5c7e91e9c3
@ -11,7 +11,7 @@ OBJS = check.o controldata.o dump.o exec.o file.o function.o info.o \
|
||||
PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
|
||||
PG_LIBS = $(libpq_pgport)
|
||||
|
||||
EXTRA_CLEAN = delete_old_cluster.sh log/ tmp_check/
|
||||
EXTRA_CLEAN = analyze_new_cluster.sh delete_old_cluster.sh log/ tmp_check/
|
||||
|
||||
ifdef USE_PGXS
|
||||
PG_CONFIG = pg_config
|
||||
|
@ -183,13 +183,10 @@ issue_warnings(char *sequence_script_file_name)
|
||||
if (sequence_script_file_name)
|
||||
{
|
||||
prep_status("Adjusting sequences");
|
||||
exec_prog(true, true, UTILITY_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
|
||||
"--set ON_ERROR_STOP=on "
|
||||
"--no-psqlrc --port %d --username \"%s\" "
|
||||
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
sequence_script_file_name, UTILITY_LOG_FILE);
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
sequence_script_file_name);
|
||||
unlink(sequence_script_file_name);
|
||||
check_ok();
|
||||
}
|
||||
|
@ -23,12 +23,11 @@ generate_old_dump(void)
|
||||
* --binary-upgrade records the width of dropped columns in pg_class, and
|
||||
* restores the frozenid's for databases and relations.
|
||||
*/
|
||||
exec_prog(true, true, UTILITY_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --username \"%s\" "
|
||||
"--schema-only --binary-upgrade %s > \"%s\" 2>> \"%s\""
|
||||
SYSTEMQUOTE, new_cluster.bindir, old_cluster.port, os_info.user,
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/pg_dumpall\" %s --schema-only --binary-upgrade %s -f %s",
|
||||
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
||||
log_opts.verbose ? "--verbose" : "",
|
||||
ALL_DUMP_FILE, UTILITY_LOG_FILE);
|
||||
ALL_DUMP_FILE);
|
||||
check_ok();
|
||||
}
|
||||
|
||||
|
@ -26,77 +26,81 @@ static int win32_check_directory_write_permissions(void);
|
||||
|
||||
/*
|
||||
* exec_prog()
|
||||
* Execute an external program with stdout/stderr redirected, and report
|
||||
* errors
|
||||
*
|
||||
* Formats a command from the given argument list and executes that
|
||||
* command. If the command executes, exec_prog() returns 1 otherwise
|
||||
* exec_prog() logs an error message and returns 0. Either way, the command
|
||||
* line to be executed is saved to the specified log file.
|
||||
* Formats a command from the given argument list, logs it to the log file,
|
||||
* and attempts to execute that command. If the command executes
|
||||
* successfully, exec_prog() returns true.
|
||||
*
|
||||
* If throw_error is TRUE, this function will throw a PG_FATAL error
|
||||
* instead of returning should an error occur. The command it appended
|
||||
* to log_file; opt_log_file is used in error messages.
|
||||
* If the command fails, an error message is saved to the specified log_file.
|
||||
* If throw_error is true, this raises a PG_FATAL error and pg_upgrade
|
||||
* terminates; otherwise it is just reported as PG_REPORT and exec_prog()
|
||||
* returns false.
|
||||
*/
|
||||
int
|
||||
exec_prog(bool throw_error, bool is_priv, const char *log_file,
|
||||
const char *opt_log_file, const char *fmt,...)
|
||||
bool
|
||||
exec_prog(const char *log_file, const char *opt_log_file,
|
||||
bool throw_error, const char *fmt,...)
|
||||
{
|
||||
va_list args;
|
||||
int result;
|
||||
int retval;
|
||||
char cmd[MAXPGPATH];
|
||||
int written;
|
||||
#define MAXCMDLEN (2 * MAXPGPATH)
|
||||
char cmd[MAXCMDLEN];
|
||||
mode_t old_umask = 0;
|
||||
FILE *log;
|
||||
va_list ap;
|
||||
|
||||
if (is_priv)
|
||||
old_umask = umask(S_IRWXG | S_IRWXO);
|
||||
old_umask = umask(S_IRWXG | S_IRWXO);
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(cmd, MAXPGPATH, fmt, args);
|
||||
va_end(args);
|
||||
written = strlcpy(cmd, SYSTEMQUOTE, strlen(SYSTEMQUOTE));
|
||||
va_start(ap, fmt);
|
||||
written += vsnprintf(cmd + written, MAXCMDLEN - written, fmt, ap);
|
||||
va_end(ap);
|
||||
if (written >= MAXCMDLEN)
|
||||
pg_log(PG_FATAL, "command too long\n");
|
||||
written += snprintf(cmd + written, MAXCMDLEN - written,
|
||||
" >> \"%s\" 2>&1" SYSTEMQUOTE, log_file);
|
||||
if (written >= MAXCMDLEN)
|
||||
pg_log(PG_FATAL, "command too long\n");
|
||||
|
||||
if ((log = fopen_priv(log_file, "a+")) == NULL)
|
||||
pg_log(PG_FATAL, "cannot write to log file %s\n", log_file);
|
||||
pg_log(PG_VERBOSE, "%s\n", cmd);
|
||||
fprintf(log, "command: %s\n", cmd);
|
||||
|
||||
/*
|
||||
* In Windows, we must close then reopen the log file so the file is
|
||||
* not open while the command is running, or we get a share violation.
|
||||
* In Windows, we must close the log file at this point so the file is not
|
||||
* open while the command is running, or we get a share violation.
|
||||
*/
|
||||
fclose(log);
|
||||
|
||||
result = system(cmd);
|
||||
|
||||
if (is_priv)
|
||||
umask(old_umask);
|
||||
umask(old_umask);
|
||||
|
||||
if (result != 0)
|
||||
{
|
||||
char opt_string[MAXPGPATH];
|
||||
|
||||
/* Create string for optional second log file */
|
||||
if (opt_log_file)
|
||||
snprintf(opt_string, sizeof(opt_string), " or \"%s\"", opt_log_file);
|
||||
else
|
||||
opt_string[0] = '\0';
|
||||
|
||||
report_status(PG_REPORT, "*failure*");
|
||||
fflush(stdout);
|
||||
pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd);
|
||||
pg_log(throw_error ? PG_FATAL : PG_REPORT,
|
||||
"Consult the last few lines of \"%s\"%s for\n"
|
||||
"the probable cause of the failure.\n",
|
||||
log_file, opt_string);
|
||||
retval = 1;
|
||||
if (opt_log_file)
|
||||
pg_log(throw_error ? PG_FATAL : PG_REPORT,
|
||||
"Consult the last few lines of \"%s\" or \"%s\" for\n"
|
||||
"the probable cause of the failure.\n",
|
||||
log_file, opt_log_file);
|
||||
else
|
||||
pg_log(throw_error ? PG_FATAL : PG_REPORT,
|
||||
"Consult the last few lines of \"%s\" for\n"
|
||||
"the probable cause of the failure.\n",
|
||||
log_file);
|
||||
}
|
||||
else
|
||||
retval = 0;
|
||||
|
||||
if ((log = fopen_priv(log_file, "a+")) == NULL)
|
||||
pg_log(PG_FATAL, "cannot write to log file %s\n", log_file);
|
||||
fprintf(log, "\n\n");
|
||||
fclose(log);
|
||||
|
||||
return retval;
|
||||
return result == 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -103,10 +103,10 @@ copyAndUpdateFile(pageCnvCtx *pageConverter,
|
||||
/*
|
||||
* linkAndUpdateFile()
|
||||
*
|
||||
* Creates a symbolic link between the given relation files. We use
|
||||
* Creates a hard link between the given relation files. We use
|
||||
* this function to perform a true in-place update. If the on-disk
|
||||
* format of the new cluster is bit-for-bit compatible with the on-disk
|
||||
* format of the old cluster, we can simply symlink each relation
|
||||
* format of the old cluster, we can simply link each relation
|
||||
* instead of copying the data from the old cluster to the new cluster.
|
||||
*/
|
||||
const char *
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "miscadmin.h"
|
||||
|
||||
#include "pg_upgrade.h"
|
||||
|
||||
#include <getopt_long.h>
|
||||
@ -376,3 +378,64 @@ adjust_data_dir(ClusterInfo *cluster)
|
||||
|
||||
check_ok();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_sock_dir
|
||||
*
|
||||
* Identify the socket directory to use for this cluster. If we're doing
|
||||
* a live check (old cluster only), we need to find out where the postmaster
|
||||
* is listening. Otherwise, we're going to put the socket into the current
|
||||
* directory.
|
||||
*/
|
||||
void
|
||||
get_sock_dir(ClusterInfo *cluster, bool live_check)
|
||||
{
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
if (!live_check)
|
||||
{
|
||||
/* Use the current directory for the socket */
|
||||
cluster->sockdir = pg_malloc(MAXPGPATH);
|
||||
if (!getcwd(cluster->sockdir, MAXPGPATH))
|
||||
pg_log(PG_FATAL, "cannot find current directory\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If we are doing a live check, we will use the old cluster's Unix
|
||||
* domain socket directory so we can connect to the live server.
|
||||
*/
|
||||
|
||||
/* sockdir was added to postmaster.pid in PG 9.1 */
|
||||
if (GET_MAJOR_VERSION(cluster->major_version) >= 901)
|
||||
{
|
||||
char filename[MAXPGPATH];
|
||||
FILE *fp;
|
||||
int i;
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s/postmaster.pid",
|
||||
cluster->pgdata);
|
||||
if ((fp = fopen(filename, "r")) == NULL)
|
||||
pg_log(PG_FATAL, "Could not get socket directory of the old server\n");
|
||||
|
||||
cluster->sockdir = pg_malloc(MAXPGPATH);
|
||||
for (i = 0; i < LOCK_FILE_LINE_SOCKET_DIR; i++)
|
||||
if (fgets(cluster->sockdir, MAXPGPATH, fp) == NULL)
|
||||
pg_log(PG_FATAL, "Could not get socket directory of the old server\n");
|
||||
|
||||
fclose(fp);
|
||||
|
||||
/* Remove trailing newline */
|
||||
if (strchr(cluster->sockdir, '\n') != NULL)
|
||||
*strchr(cluster->sockdir, '\n') = '\0';
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Can't get live sockdir, so assume the default is OK. */
|
||||
cluster->sockdir = NULL;
|
||||
}
|
||||
}
|
||||
#else /* !HAVE_UNIX_SOCKETS */
|
||||
cluster->sockdir = NULL;
|
||||
#endif
|
||||
}
|
||||
|
@ -88,6 +88,9 @@ main(int argc, char **argv)
|
||||
check_cluster_versions();
|
||||
check_cluster_compatibility(live_check);
|
||||
|
||||
get_sock_dir(&old_cluster, live_check);
|
||||
get_sock_dir(&new_cluster, false);
|
||||
|
||||
check_old_cluster(live_check, &sequence_script_file_name);
|
||||
|
||||
|
||||
@ -140,11 +143,10 @@ main(int argc, char **argv)
|
||||
* because there is no need to have the schema load use new oids.
|
||||
*/
|
||||
prep_status("Setting next OID for new cluster");
|
||||
exec_prog(true, true, UTILITY_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" >> \"%s\" 2>&1"
|
||||
SYSTEMQUOTE,
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/pg_resetxlog\" -o %u \"%s\"",
|
||||
new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
|
||||
new_cluster.pgdata, UTILITY_LOG_FILE);
|
||||
new_cluster.pgdata);
|
||||
check_ok();
|
||||
|
||||
create_script_for_cluster_analyze(&analyze_script_file_name);
|
||||
@ -211,11 +213,10 @@ prepare_new_cluster(void)
|
||||
* --analyze so autovacuum doesn't update statistics later
|
||||
*/
|
||||
prep_status("Analyzing all rows in the new cluster");
|
||||
exec_prog(true, true, UTILITY_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
|
||||
"--all --analyze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/vacuumdb\" %s --all --analyze %s",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
log_opts.verbose ? "--verbose" : "");
|
||||
check_ok();
|
||||
|
||||
/*
|
||||
@ -225,11 +226,10 @@ prepare_new_cluster(void)
|
||||
* later.
|
||||
*/
|
||||
prep_status("Freezing all rows on the new cluster");
|
||||
exec_prog(true, true, UTILITY_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
|
||||
"--all --freeze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/vacuumdb\" %s --all --freeze %s",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
log_opts.verbose ? "--verbose" : "");
|
||||
check_ok();
|
||||
|
||||
get_pg_database_relfilenode(&new_cluster);
|
||||
@ -263,14 +263,10 @@ prepare_new_databases(void)
|
||||
* support functions in template1 but pg_dumpall creates database using
|
||||
* the template0 template.
|
||||
*/
|
||||
exec_prog(true, true, RESTORE_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
|
||||
"--set ON_ERROR_STOP=on "
|
||||
/* --no-psqlrc prevents AUTOCOMMIT=off */
|
||||
"--no-psqlrc --port %d --username \"%s\" "
|
||||
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
GLOBALS_DUMP_FILE, RESTORE_LOG_FILE);
|
||||
exec_prog(RESTORE_LOG_FILE, NULL, true,
|
||||
"\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
GLOBALS_DUMP_FILE);
|
||||
check_ok();
|
||||
|
||||
/* we load this to get a current list of databases */
|
||||
@ -296,13 +292,10 @@ create_new_objects(void)
|
||||
check_ok();
|
||||
|
||||
prep_status("Restoring database schema to new cluster");
|
||||
exec_prog(true, true, RESTORE_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
|
||||
"--set ON_ERROR_STOP=on "
|
||||
"--no-psqlrc --port %d --username \"%s\" "
|
||||
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
DB_DUMP_FILE, RESTORE_LOG_FILE);
|
||||
exec_prog(RESTORE_LOG_FILE, NULL, true,
|
||||
"\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
DB_DUMP_FILE);
|
||||
check_ok();
|
||||
|
||||
/* regenerate now that we have objects in the databases */
|
||||
@ -311,55 +304,60 @@ create_new_objects(void)
|
||||
uninstall_support_functions_from_new_cluster();
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete the given subdirectory contents from the new cluster, and copy the
|
||||
* files from the old cluster into it.
|
||||
*/
|
||||
static void
|
||||
copy_subdir_files(char *subdir)
|
||||
{
|
||||
char old_path[MAXPGPATH];
|
||||
char new_path[MAXPGPATH];
|
||||
|
||||
prep_status("Deleting files from new %s", subdir);
|
||||
|
||||
snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir);
|
||||
snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
|
||||
if (!rmtree(new_path, true))
|
||||
pg_log(PG_FATAL, "could not delete directory \"%s\"\n", new_path);
|
||||
check_ok();
|
||||
|
||||
prep_status("Copying old %s to new server", subdir);
|
||||
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
#ifndef WIN32
|
||||
"cp -Rf \"%s\" \"%s\"",
|
||||
#else
|
||||
/* flags: everything, no confirm, quiet, overwrite read-only */
|
||||
"xcopy /e /y /q /r \"%s\" \"%s\\\"",
|
||||
#endif
|
||||
old_path, new_path);
|
||||
|
||||
check_ok();
|
||||
}
|
||||
|
||||
static void
|
||||
copy_clog_xlog_xid(void)
|
||||
{
|
||||
char old_clog_path[MAXPGPATH];
|
||||
char new_clog_path[MAXPGPATH];
|
||||
|
||||
/* copy old commit logs to new data dir */
|
||||
prep_status("Deleting new commit clogs");
|
||||
|
||||
snprintf(old_clog_path, sizeof(old_clog_path), "%s/pg_clog", old_cluster.pgdata);
|
||||
snprintf(new_clog_path, sizeof(new_clog_path), "%s/pg_clog", new_cluster.pgdata);
|
||||
if (!rmtree(new_clog_path, true))
|
||||
pg_log(PG_FATAL, "could not delete directory \"%s\"\n", new_clog_path);
|
||||
check_ok();
|
||||
|
||||
prep_status("Copying old commit clogs to new server");
|
||||
exec_prog(true, false, UTILITY_LOG_FILE, NULL,
|
||||
#ifndef WIN32
|
||||
SYSTEMQUOTE "%s \"%s\" \"%s\" >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
"cp -Rf",
|
||||
#else
|
||||
/* flags: everything, no confirm, quiet, overwrite read-only */
|
||||
SYSTEMQUOTE "%s \"%s\" \"%s\\\" >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
"xcopy /e /y /q /r",
|
||||
#endif
|
||||
old_clog_path, new_clog_path, UTILITY_LOG_FILE);
|
||||
check_ok();
|
||||
copy_subdir_files("pg_clog");
|
||||
|
||||
/* set the next transaction id of the new cluster */
|
||||
prep_status("Setting next transaction ID for new cluster");
|
||||
exec_prog(true, true, UTILITY_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE
|
||||
"\"%s/pg_resetxlog\" -f -x %u \"%s\" >> \"%s\" 2>&1"
|
||||
SYSTEMQUOTE, new_cluster.bindir,
|
||||
old_cluster.controldata.chkpnt_nxtxid,
|
||||
new_cluster.pgdata, UTILITY_LOG_FILE);
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/pg_resetxlog\" -f -x %u \"%s\"",
|
||||
new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
|
||||
new_cluster.pgdata);
|
||||
check_ok();
|
||||
|
||||
/* now reset the wal archives in the new cluster */
|
||||
prep_status("Resetting WAL archives");
|
||||
exec_prog(true, true, UTILITY_LOG_FILE, NULL,
|
||||
SYSTEMQUOTE
|
||||
"\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\" >> \"%s\" 2>&1"
|
||||
SYSTEMQUOTE, new_cluster.bindir,
|
||||
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
||||
"\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\"", new_cluster.bindir,
|
||||
old_cluster.controldata.chkpnt_tli,
|
||||
old_cluster.controldata.logid,
|
||||
old_cluster.controldata.nxtlogseg,
|
||||
new_cluster.pgdata, UTILITY_LOG_FILE);
|
||||
new_cluster.pgdata);
|
||||
check_ok();
|
||||
}
|
||||
|
||||
|
@ -227,6 +227,7 @@ typedef struct
|
||||
char *bindir; /* pathname for cluster's executable directory */
|
||||
char *pgopts; /* options to pass to the server, like pg_ctl
|
||||
* -o */
|
||||
char *sockdir; /* directory for Unix Domain socket, if any */
|
||||
unsigned short port; /* port number where postmaster is waiting */
|
||||
uint32 major_version; /* PG_VERSION of cluster */
|
||||
char major_version_str[64]; /* string PG_VERSION of cluster */
|
||||
@ -317,10 +318,11 @@ void split_old_dump(void);
|
||||
|
||||
/* exec.c */
|
||||
|
||||
int
|
||||
exec_prog(bool throw_error, bool is_priv, const char *log_file,
|
||||
const char *opt_log_file, const char *cmd,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 5, 6)));
|
||||
#define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1"
|
||||
bool
|
||||
exec_prog(const char *log_file, const char *opt_log_file,
|
||||
bool throw_error, const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
|
||||
void verify_directories(void);
|
||||
bool is_server_running(const char *datadir);
|
||||
|
||||
@ -387,6 +389,7 @@ void print_maps(FileNameMap *maps, int n,
|
||||
|
||||
void parseCommandLine(int argc, char *argv[]);
|
||||
void adjust_data_dir(ClusterInfo *cluster);
|
||||
void get_sock_dir(ClusterInfo *cluster, bool live_check);
|
||||
|
||||
/* relfilenode.c */
|
||||
|
||||
@ -407,6 +410,8 @@ PGresult *
|
||||
executeQueryOrDie(PGconn *conn, const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
|
||||
char *cluster_conn_opts(ClusterInfo *cluster);
|
||||
|
||||
void start_postmaster(ClusterInfo *cluster);
|
||||
void stop_postmaster(bool fast);
|
||||
uint32 get_major_server_version(ClusterInfo *cluster);
|
||||
|
@ -46,21 +46,54 @@ connectToServer(ClusterInfo *cluster, const char *db_name)
|
||||
/*
|
||||
* get_db_conn()
|
||||
*
|
||||
* get database connection
|
||||
* get database connection, using named database + standard params for cluster
|
||||
*/
|
||||
static PGconn *
|
||||
get_db_conn(ClusterInfo *cluster, const char *db_name)
|
||||
{
|
||||
char conn_opts[MAXPGPATH];
|
||||
char conn_opts[2 * NAMEDATALEN + MAXPGPATH + 100];
|
||||
|
||||
snprintf(conn_opts, sizeof(conn_opts),
|
||||
"dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
|
||||
cluster->port);
|
||||
if (cluster->sockdir)
|
||||
snprintf(conn_opts, sizeof(conn_opts),
|
||||
"dbname = '%s' user = '%s' host = '%s' port = %d",
|
||||
db_name, os_info.user, cluster->sockdir, cluster->port);
|
||||
else
|
||||
snprintf(conn_opts, sizeof(conn_opts),
|
||||
"dbname = '%s' user = '%s' port = %d",
|
||||
db_name, os_info.user, cluster->port);
|
||||
|
||||
return PQconnectdb(conn_opts);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* cluster_conn_opts()
|
||||
*
|
||||
* Return standard command-line options for connecting to this cluster when
|
||||
* using psql, pg_dump, etc. Ideally this would match what get_db_conn()
|
||||
* sets, but the utilities we need aren't very consistent about the treatment
|
||||
* of database name options, so we leave that out.
|
||||
*
|
||||
* Note result is in static storage, so use it right away.
|
||||
*/
|
||||
char *
|
||||
cluster_conn_opts(ClusterInfo *cluster)
|
||||
{
|
||||
static char conn_opts[MAXPGPATH + NAMEDATALEN + 100];
|
||||
|
||||
if (cluster->sockdir)
|
||||
snprintf(conn_opts, sizeof(conn_opts),
|
||||
"--host \"%s\" --port %d --username \"%s\"",
|
||||
cluster->sockdir, cluster->port, os_info.user);
|
||||
else
|
||||
snprintf(conn_opts, sizeof(conn_opts),
|
||||
"--port %d --username \"%s\"",
|
||||
cluster->port, os_info.user);
|
||||
|
||||
return conn_opts;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* executeQueryOrDie()
|
||||
*
|
||||
@ -140,10 +173,11 @@ stop_postmaster_atexit(void)
|
||||
void
|
||||
start_postmaster(ClusterInfo *cluster)
|
||||
{
|
||||
char cmd[MAXPGPATH];
|
||||
char cmd[MAXPGPATH * 4 + 1000];
|
||||
PGconn *conn;
|
||||
bool exit_hook_registered = false;
|
||||
int pg_ctl_return = 0;
|
||||
bool pg_ctl_return = false;
|
||||
char socket_string[MAXPGPATH + 200];
|
||||
|
||||
if (!exit_hook_registered)
|
||||
{
|
||||
@ -151,6 +185,23 @@ start_postmaster(ClusterInfo *cluster)
|
||||
exit_hook_registered = true;
|
||||
}
|
||||
|
||||
socket_string[0] = '\0';
|
||||
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
/* prevent TCP/IP connections, restrict socket access */
|
||||
strcat(socket_string,
|
||||
" -c listen_addresses='' -c unix_socket_permissions=0700");
|
||||
|
||||
/* Have a sockdir? Tell the postmaster. */
|
||||
if (cluster->sockdir)
|
||||
snprintf(socket_string + strlen(socket_string),
|
||||
sizeof(socket_string) - strlen(socket_string),
|
||||
" -c %s='%s'",
|
||||
(GET_MAJOR_VERSION(cluster->major_version) < 903) ?
|
||||
"unix_socket_directory" : "unix_socket_directories",
|
||||
cluster->sockdir);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Using autovacuum=off disables cleanup vacuum and analyze, but freeze
|
||||
* vacuums can still happen, so we set autovacuum_freeze_max_age to its
|
||||
@ -159,22 +210,23 @@ start_postmaster(ClusterInfo *cluster)
|
||||
* not touch them.
|
||||
*/
|
||||
snprintf(cmd, sizeof(cmd),
|
||||
SYSTEMQUOTE "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" "
|
||||
"-o \"-p %d %s %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
"\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d %s %s%s\" start",
|
||||
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
|
||||
(cluster->controldata.cat_ver >=
|
||||
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
|
||||
"-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
|
||||
cluster->pgopts ? cluster->pgopts : "", SERVER_START_LOG_FILE);
|
||||
cluster->pgopts ? cluster->pgopts : "", socket_string);
|
||||
|
||||
/*
|
||||
* Don't throw an error right away, let connecting throw the error because
|
||||
* it might supply a reason for the failure.
|
||||
*/
|
||||
pg_ctl_return = exec_prog(false, true, SERVER_START_LOG_FILE,
|
||||
/* pass both file names if the differ */
|
||||
(strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) != 0) ?
|
||||
pg_ctl_return = exec_prog(SERVER_START_LOG_FILE,
|
||||
/* pass both file names if they differ */
|
||||
(strcmp(SERVER_LOG_FILE,
|
||||
SERVER_START_LOG_FILE) != 0) ?
|
||||
SERVER_LOG_FILE : NULL,
|
||||
false,
|
||||
"%s", cmd);
|
||||
|
||||
/* Check to see if we can connect to the server; if not, report it. */
|
||||
@ -185,13 +237,14 @@ start_postmaster(ClusterInfo *cluster)
|
||||
PQerrorMessage(conn));
|
||||
if (conn)
|
||||
PQfinish(conn);
|
||||
pg_log(PG_FATAL, "could not connect to %s postmaster started with the command: %s\n",
|
||||
pg_log(PG_FATAL, "could not connect to %s postmaster started with the command:\n"
|
||||
"%s\n",
|
||||
CLUSTER_NAME(cluster), cmd);
|
||||
}
|
||||
PQfinish(conn);
|
||||
|
||||
/* If the connection didn't fail, fail now */
|
||||
if (pg_ctl_return != 0)
|
||||
if (!pg_ctl_return)
|
||||
pg_log(PG_FATAL, "pg_ctl failed to start the %s server, or connection failed\n",
|
||||
CLUSTER_NAME(cluster));
|
||||
|
||||
@ -202,7 +255,6 @@ start_postmaster(ClusterInfo *cluster)
|
||||
void
|
||||
stop_postmaster(bool fast)
|
||||
{
|
||||
char cmd[MAXPGPATH];
|
||||
ClusterInfo *cluster;
|
||||
|
||||
if (os_info.running_cluster == &old_cluster)
|
||||
@ -212,14 +264,11 @@ stop_postmaster(bool fast)
|
||||
else
|
||||
return; /* no cluster running */
|
||||
|
||||
snprintf(cmd, sizeof(cmd),
|
||||
SYSTEMQUOTE "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" "
|
||||
"%s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
cluster->bindir, cluster->pgconfig,
|
||||
cluster->pgopts ? cluster->pgopts : "",
|
||||
fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
|
||||
|
||||
exec_prog(fast ? false : true, true, SERVER_STOP_LOG_FILE, NULL, "%s", cmd);
|
||||
exec_prog(SERVER_STOP_LOG_FILE, NULL, !fast,
|
||||
"\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" %s stop",
|
||||
cluster->bindir, cluster->pgconfig,
|
||||
cluster->pgopts ? cluster->pgopts : "",
|
||||
fast ? "-m fast" : "");
|
||||
|
||||
os_info.running_cluster = NULL;
|
||||
}
|
||||
|
@ -107,6 +107,7 @@ initdb
|
||||
pg_upgrade -d "${PGDATA}.old" -D "${PGDATA}" -b "$oldbindir" -B "$bindir"
|
||||
|
||||
pg_ctl start -l "$logdir/postmaster2.log" -w
|
||||
sh ./analyze_new_cluster.sh
|
||||
pg_dumpall >"$temp_root"/dump2.sql || pg_dumpall2_status=$?
|
||||
pg_ctl -m fast stop
|
||||
if [ -n "$pg_dumpall2_status" ]; then
|
||||
@ -114,6 +115,8 @@ if [ -n "$pg_dumpall2_status" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sh ./delete_old_cluster.sh
|
||||
|
||||
if diff -q "$temp_root"/dump1.sql "$temp_root"/dump2.sql; then
|
||||
echo PASSED
|
||||
exit 0
|
||||
|
@ -367,8 +367,10 @@ pg_upgrade.exe
|
||||
Obviously, no one should be accessing the clusters during the
|
||||
upgrade. <application>pg_upgrade</> defaults to running servers
|
||||
on port 50432 to avoid unintended client connections.
|
||||
You can use the same port numbers for both clusters because the
|
||||
old and new clusters will not be running at the same time.
|
||||
You can use the same port number for both clusters when doing an
|
||||
upgrade because the old and new clusters will not be running at the
|
||||
same time. However, when checking an old running server, the old
|
||||
and new port numbers must be different.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -517,6 +519,14 @@ psql --username postgres --file script.sql postgres
|
||||
<literal>-d /real-data-directory -o '-D /configuration-directory'</>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If doing <option>--check</> with a running old server of a pre-9.1 version,
|
||||
and the old server is using a Unix-domain socket directory that is
|
||||
different from the default built into the new <productname>PostgreSQL</>
|
||||
installation, set <envar>PGHOST</> to point to the socket location of the
|
||||
old server. (This is not relevant on Windows.)
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A Log-Shipping Standby Server (<xref linkend="warm-standby">) cannot
|
||||
be upgraded because the server must allow writes. The simplest way
|
||||
|
Loading…
x
Reference in New Issue
Block a user