mirror of
https://github.com/postgres/postgres.git
synced 2025-06-29 10:41:53 +03:00
pgindent run for release 9.3
This is the first run of the Perl-based pgindent script. Also update pgindent instructions.
This commit is contained in:
@ -28,7 +28,7 @@ static char *get_canonical_locale_name(int category, const char *locale);
|
||||
* fix_path_separator
|
||||
* For non-Windows, just return the argument.
|
||||
* For Windows convert any forward slash to a backslash
|
||||
* such as is suitable for arguments to builtin commands
|
||||
* such as is suitable for arguments to builtin commands
|
||||
* like RMDIR and DEL.
|
||||
*/
|
||||
static char *
|
||||
@ -36,8 +36,8 @@ fix_path_separator(char *path)
|
||||
{
|
||||
#ifdef WIN32
|
||||
|
||||
char *result;
|
||||
char *c;
|
||||
char *result;
|
||||
char *c;
|
||||
|
||||
result = pg_strdup(path);
|
||||
|
||||
@ -46,11 +46,9 @@ fix_path_separator(char *path)
|
||||
*c = '\\';
|
||||
|
||||
return result;
|
||||
|
||||
#else
|
||||
|
||||
return path;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -156,21 +154,21 @@ check_new_cluster(void)
|
||||
check_is_super_user(&new_cluster);
|
||||
|
||||
/*
|
||||
* We don't restore our own user, so both clusters must match have
|
||||
* matching install-user oids.
|
||||
* We don't restore our own user, so both clusters must match have
|
||||
* matching install-user oids.
|
||||
*/
|
||||
if (old_cluster.install_role_oid != new_cluster.install_role_oid)
|
||||
pg_log(PG_FATAL,
|
||||
"Old and new cluster install users have different values for pg_authid.oid.\n");
|
||||
"Old and new cluster install users have different values for pg_authid.oid.\n");
|
||||
|
||||
/*
|
||||
* We only allow the install user in the new cluster because other
|
||||
* defined users might match users defined in the old cluster and
|
||||
* generate an error during pg_dump restore.
|
||||
* We only allow the install user in the new cluster because other defined
|
||||
* users might match users defined in the old cluster and generate an
|
||||
* error during pg_dump restore.
|
||||
*/
|
||||
if (new_cluster.role_count != 1)
|
||||
pg_log(PG_FATAL, "Only the install user can be defined in the new cluster.\n");
|
||||
|
||||
|
||||
check_for_prepared_transactions(&new_cluster);
|
||||
}
|
||||
|
||||
@ -247,14 +245,14 @@ output_completion_banner(char *analyze_script_file_name,
|
||||
|
||||
if (deletion_script_file_name)
|
||||
pg_log(PG_REPORT,
|
||||
"Running this script will delete the old cluster's data files:\n"
|
||||
"Running this script will delete the old cluster's data files:\n"
|
||||
" %s\n",
|
||||
deletion_script_file_name);
|
||||
else
|
||||
pg_log(PG_REPORT,
|
||||
"Could not create a script to delete the old cluster's data\n"
|
||||
"files because user-defined tablespaces exist in the old cluster\n"
|
||||
"directory. The old cluster's contents must be deleted manually.\n");
|
||||
"files because user-defined tablespaces exist in the old cluster\n"
|
||||
"directory. The old cluster's contents must be deleted manually.\n");
|
||||
}
|
||||
|
||||
|
||||
@ -323,8 +321,8 @@ check_cluster_compatibility(bool live_check)
|
||||
/* We read the real port number for PG >= 9.1 */
|
||||
if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
|
||||
old_cluster.port == DEF_PGUPORT)
|
||||
pg_log(PG_FATAL, "When checking a pre-PG 9.1 live old server, "
|
||||
"you must specify the old server's port number.\n");
|
||||
pg_log(PG_FATAL, "When checking a pre-PG 9.1 live old server, "
|
||||
"you must specify the old server's port number.\n");
|
||||
|
||||
if (live_check && old_cluster.port == new_cluster.port)
|
||||
pg_log(PG_FATAL, "When checking a live server, "
|
||||
@ -366,18 +364,18 @@ set_locale_and_encoding(ClusterInfo *cluster)
|
||||
if (GET_MAJOR_VERSION(cluster->major_version) < 902)
|
||||
{
|
||||
/*
|
||||
* Pre-9.2 did not canonicalize the supplied locale names
|
||||
* to match what the system returns, while 9.2+ does, so
|
||||
* convert pre-9.2 to match.
|
||||
* Pre-9.2 did not canonicalize the supplied locale names to match
|
||||
* what the system returns, while 9.2+ does, so convert pre-9.2 to
|
||||
* match.
|
||||
*/
|
||||
ctrl->lc_collate = get_canonical_locale_name(LC_COLLATE,
|
||||
pg_strdup(PQgetvalue(res, 0, i_datcollate)));
|
||||
pg_strdup(PQgetvalue(res, 0, i_datcollate)));
|
||||
ctrl->lc_ctype = get_canonical_locale_name(LC_CTYPE,
|
||||
pg_strdup(PQgetvalue(res, 0, i_datctype)));
|
||||
}
|
||||
pg_strdup(PQgetvalue(res, 0, i_datctype)));
|
||||
}
|
||||
else
|
||||
{
|
||||
ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
|
||||
ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
|
||||
ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));
|
||||
}
|
||||
|
||||
@ -410,21 +408,21 @@ check_locale_and_encoding(ControlData *oldctrl,
|
||||
ControlData *newctrl)
|
||||
{
|
||||
/*
|
||||
* These are often defined with inconsistent case, so use pg_strcasecmp().
|
||||
* They also often use inconsistent hyphenation, which we cannot fix, e.g.
|
||||
* UTF-8 vs. UTF8, so at least we display the mismatching values.
|
||||
* These are often defined with inconsistent case, so use pg_strcasecmp().
|
||||
* They also often use inconsistent hyphenation, which we cannot fix, e.g.
|
||||
* UTF-8 vs. UTF8, so at least we display the mismatching values.
|
||||
*/
|
||||
if (pg_strcasecmp(oldctrl->lc_collate, newctrl->lc_collate) != 0)
|
||||
pg_log(PG_FATAL,
|
||||
"lc_collate cluster values do not match: old \"%s\", new \"%s\"\n",
|
||||
"lc_collate cluster values do not match: old \"%s\", new \"%s\"\n",
|
||||
oldctrl->lc_collate, newctrl->lc_collate);
|
||||
if (pg_strcasecmp(oldctrl->lc_ctype, newctrl->lc_ctype) != 0)
|
||||
pg_log(PG_FATAL,
|
||||
"lc_ctype cluster values do not match: old \"%s\", new \"%s\"\n",
|
||||
"lc_ctype cluster values do not match: old \"%s\", new \"%s\"\n",
|
||||
oldctrl->lc_ctype, newctrl->lc_ctype);
|
||||
if (pg_strcasecmp(oldctrl->encoding, newctrl->encoding) != 0)
|
||||
pg_log(PG_FATAL,
|
||||
"encoding cluster values do not match: old \"%s\", new \"%s\"\n",
|
||||
"encoding cluster values do not match: old \"%s\", new \"%s\"\n",
|
||||
oldctrl->encoding, newctrl->encoding);
|
||||
}
|
||||
|
||||
@ -597,16 +595,16 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
|
||||
SCRIPT_EXT);
|
||||
|
||||
/*
|
||||
* Some users (oddly) create tablespaces inside the cluster data
|
||||
* directory. We can't create a proper old cluster delete script
|
||||
* in that case.
|
||||
* Some users (oddly) create tablespaces inside the cluster data
|
||||
* directory. We can't create a proper old cluster delete script in that
|
||||
* case.
|
||||
*/
|
||||
strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
|
||||
canonicalize_path(old_cluster_pgdata);
|
||||
for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
|
||||
{
|
||||
char old_tablespace_dir[MAXPGPATH];
|
||||
|
||||
|
||||
strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
|
||||
canonicalize_path(old_tablespace_dir);
|
||||
if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
|
||||
@ -649,7 +647,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
|
||||
/* remove PG_VERSION? */
|
||||
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
|
||||
fprintf(script, RM_CMD " %s%s%cPG_VERSION\n",
|
||||
fix_path_separator(os_info.old_tablespaces[tblnum]),
|
||||
fix_path_separator(os_info.old_tablespaces[tblnum]),
|
||||
fix_path_separator(old_cluster.tablespace_suffix),
|
||||
PATH_SEPARATOR);
|
||||
|
||||
@ -668,7 +666,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
|
||||
* or a version-specific subdirectory.
|
||||
*/
|
||||
fprintf(script, RMDIR_CMD " %s%s\n",
|
||||
fix_path_separator(os_info.old_tablespaces[tblnum]),
|
||||
fix_path_separator(os_info.old_tablespaces[tblnum]),
|
||||
fix_path_separator(old_cluster.tablespace_suffix));
|
||||
}
|
||||
|
||||
@ -997,7 +995,7 @@ get_canonical_locale_name(int category, const char *locale)
|
||||
|
||||
save = setlocale(category, NULL);
|
||||
if (!save)
|
||||
pg_log(PG_FATAL, "failed to get the current locale\n");
|
||||
pg_log(PG_FATAL, "failed to get the current locale\n");
|
||||
|
||||
/* 'save' may be pointing at a modifiable scratch variable, so copy it. */
|
||||
save = pg_strdup(save);
|
||||
@ -1006,13 +1004,13 @@ get_canonical_locale_name(int category, const char *locale)
|
||||
res = setlocale(category, locale);
|
||||
|
||||
if (!res)
|
||||
pg_log(PG_FATAL, "failed to get system local name for \"%s\"\n", res);
|
||||
pg_log(PG_FATAL, "failed to get system local name for \"%s\"\n", res);
|
||||
|
||||
res = pg_strdup(res);
|
||||
|
||||
/* restore old value. */
|
||||
if (!setlocale(category, save))
|
||||
pg_log(PG_FATAL, "failed to restore old locale \"%s\"\n", save);
|
||||
pg_log(PG_FATAL, "failed to restore old locale \"%s\"\n", save);
|
||||
|
||||
pg_free(save);
|
||||
|
||||
|
@ -472,10 +472,10 @@ get_control_data(ClusterInfo *cluster, bool live_check)
|
||||
pg_free(lc_messages);
|
||||
|
||||
/*
|
||||
* Before 9.3, pg_resetxlog reported the xlogid and segno of the first
|
||||
* log file after reset as separate lines. Starting with 9.3, it reports
|
||||
* the WAL file name. If the old cluster is older than 9.3, we construct
|
||||
* the WAL file name from the xlogid and segno.
|
||||
* Before 9.3, pg_resetxlog reported the xlogid and segno of the first log
|
||||
* file after reset as separate lines. Starting with 9.3, it reports the
|
||||
* WAL file name. If the old cluster is older than 9.3, we construct the
|
||||
* WAL file name from the xlogid and segno.
|
||||
*/
|
||||
if (GET_MAJOR_VERSION(cluster->major_version) <= 902)
|
||||
{
|
||||
@ -499,8 +499,8 @@ get_control_data(ClusterInfo *cluster, bool live_check)
|
||||
!got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version)
|
||||
{
|
||||
pg_log(PG_REPORT,
|
||||
"The %s cluster lacks some required control information:\n",
|
||||
CLUSTER_NAME(cluster));
|
||||
"The %s cluster lacks some required control information:\n",
|
||||
CLUSTER_NAME(cluster));
|
||||
|
||||
if (!got_xid)
|
||||
pg_log(PG_REPORT, " checkpoint next XID\n");
|
||||
@ -576,7 +576,7 @@ check_control_data(ControlData *oldctrl,
|
||||
{
|
||||
if (oldctrl->align == 0 || oldctrl->align != newctrl->align)
|
||||
pg_log(PG_FATAL,
|
||||
"old and new pg_controldata alignments are invalid or do not match\n"
|
||||
"old and new pg_controldata alignments are invalid or do not match\n"
|
||||
"Likely one cluster is a 32-bit install, the other 64-bit\n");
|
||||
|
||||
if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz)
|
||||
@ -621,7 +621,10 @@ check_control_data(ControlData *oldctrl,
|
||||
"options.\n");
|
||||
}
|
||||
|
||||
/* We might eventually allow upgrades from checksum to no-checksum clusters. */
|
||||
/*
|
||||
* We might eventually allow upgrades from checksum to no-checksum
|
||||
* clusters.
|
||||
*/
|
||||
if (oldctrl->data_checksum_version != newctrl->data_checksum_version)
|
||||
{
|
||||
pg_log(PG_FATAL,
|
||||
|
@ -44,6 +44,7 @@ exec_prog(const char *log_file, const char *opt_log_file,
|
||||
{
|
||||
int result;
|
||||
int written;
|
||||
|
||||
#define MAXCMDLEN (2 * MAXPGPATH)
|
||||
char cmd[MAXCMDLEN];
|
||||
mode_t old_umask = 0;
|
||||
@ -67,15 +68,15 @@ exec_prog(const char *log_file, const char *opt_log_file,
|
||||
|
||||
#ifdef WIN32
|
||||
{
|
||||
/*
|
||||
* "pg_ctl -w stop" might have reported that the server has
|
||||
* stopped because the postmaster.pid file has been removed,
|
||||
* but "pg_ctl -w start" might still be in the process of
|
||||
* closing and might still be holding its stdout and -l log
|
||||
* file descriptors open. Therefore, try to open the log
|
||||
* file a few more times.
|
||||
/*
|
||||
* "pg_ctl -w stop" might have reported that the server has stopped
|
||||
* because the postmaster.pid file has been removed, but "pg_ctl -w
|
||||
* start" might still be in the process of closing and might still be
|
||||
* holding its stdout and -l log file descriptors open. Therefore,
|
||||
* try to open the log file a few more times.
|
||||
*/
|
||||
int iter;
|
||||
int iter;
|
||||
|
||||
for (iter = 0; iter < 4 && log == NULL; iter++)
|
||||
{
|
||||
sleep(1);
|
||||
@ -122,12 +123,13 @@ exec_prog(const char *log_file, const char *opt_log_file,
|
||||
}
|
||||
|
||||
#ifndef WIN32
|
||||
/*
|
||||
* We can't do this on Windows because it will keep the "pg_ctl start"
|
||||
* output filename open until the server stops, so we do the \n\n above
|
||||
* on that platform. We use a unique filename for "pg_ctl start" that is
|
||||
* never reused while the server is running, so it works fine. We could
|
||||
* log these commands to a third file, but that just adds complexity.
|
||||
|
||||
/*
|
||||
* We can't do this on Windows because it will keep the "pg_ctl start"
|
||||
* output filename open until the server stops, so we do the \n\n above on
|
||||
* that platform. We use a unique filename for "pg_ctl start" that is
|
||||
* never reused while the server is running, so it works fine. We could
|
||||
* log these commands to a third file, but that just adds complexity.
|
||||
*/
|
||||
if ((log = fopen_priv(log_file, "a")) == NULL)
|
||||
pg_log(PG_FATAL, "cannot write to log file %s\n", log_file);
|
||||
@ -178,7 +180,6 @@ pid_lock_file_exists(const char *datadir)
|
||||
void
|
||||
verify_directories(void)
|
||||
{
|
||||
|
||||
#ifndef WIN32
|
||||
if (access(".", R_OK | W_OK | X_OK) != 0)
|
||||
#else
|
||||
|
@ -127,14 +127,13 @@ linkAndUpdateFile(pageCnvCtx *pageConverter,
|
||||
static int
|
||||
copy_file(const char *srcfile, const char *dstfile, bool force)
|
||||
{
|
||||
|
||||
#define COPY_BUF_SIZE (50 * BLCKSZ)
|
||||
|
||||
int src_fd;
|
||||
int dest_fd;
|
||||
char *buffer;
|
||||
int ret = 0;
|
||||
int save_errno = 0;
|
||||
int save_errno = 0;
|
||||
|
||||
if ((srcfile == NULL) || (dstfile == NULL))
|
||||
return -1;
|
||||
|
@ -60,10 +60,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
|
||||
* table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
|
||||
* 9.0, TOAST relation names always use heap table oids, hence we
|
||||
* cannot check relation names when upgrading from pre-9.0. Clusters
|
||||
* upgraded to 9.0 will get matching TOAST names.
|
||||
* If index names don't match primary key constraint names, this will
|
||||
* fail because pg_dump dumps constraint names and pg_upgrade checks
|
||||
* index names.
|
||||
* upgraded to 9.0 will get matching TOAST names. If index names don't
|
||||
* match primary key constraint names, this will fail because pg_dump
|
||||
* dumps constraint names and pg_upgrade checks index names.
|
||||
*/
|
||||
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
|
||||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
|
||||
@ -79,7 +78,10 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
|
||||
num_maps++;
|
||||
}
|
||||
|
||||
/* Do this check after the loop so hopefully we will produce a clearer error above */
|
||||
/*
|
||||
* Do this check after the loop so hopefully we will produce a clearer
|
||||
* error above
|
||||
*/
|
||||
if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
|
||||
pg_log(PG_FATAL, "old and new databases \"%s\" have a different number of relations\n",
|
||||
old_db->db_name);
|
||||
@ -285,8 +287,11 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
|
||||
"LEFT OUTER JOIN pg_catalog.pg_index i "
|
||||
" ON c.oid = i.indexrelid "
|
||||
"WHERE relkind IN ('r', 'm', 'i'%s) AND "
|
||||
/* pg_dump only dumps valid indexes; testing indisready is
|
||||
* necessary in 9.2, and harmless in earlier/later versions. */
|
||||
|
||||
/*
|
||||
* pg_dump only dumps valid indexes; testing indisready is necessary in
|
||||
* 9.2, and harmless in earlier/later versions.
|
||||
*/
|
||||
" i.indisvalid IS DISTINCT FROM false AND "
|
||||
" i.indisready IS DISTINCT FROM false AND "
|
||||
/* exclude possible orphaned temp tables */
|
||||
@ -309,8 +314,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
|
||||
PQclear(executeQueryOrDie(conn, "%s", query));
|
||||
|
||||
/*
|
||||
* Get TOAST tables and indexes; we have to gather the TOAST tables in
|
||||
* later steps because we can't schema-qualify TOAST tables.
|
||||
* Get TOAST tables and indexes; we have to gather the TOAST tables in
|
||||
* later steps because we can't schema-qualify TOAST tables.
|
||||
*/
|
||||
PQclear(executeQueryOrDie(conn,
|
||||
"INSERT INTO info_rels "
|
||||
@ -335,8 +340,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
|
||||
/* we preserve pg_class.oid so we sort by it to match old/new */
|
||||
"ORDER BY 1;",
|
||||
/* 9.2 removed the spclocation column */
|
||||
(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
|
||||
"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
|
||||
(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
|
||||
"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
|
||||
|
||||
res = executeQueryOrDie(conn, "%s", query);
|
||||
|
||||
@ -437,5 +442,5 @@ print_rel_infos(RelInfoArr *rel_arr)
|
||||
for (relnum = 0; relnum < rel_arr->nrels; relnum++)
|
||||
pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n",
|
||||
rel_arr->rels[relnum].nspname, rel_arr->rels[relnum].relname,
|
||||
rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
|
||||
rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
|
||||
}
|
||||
|
@ -314,8 +314,8 @@ check_required_directory(char **dirpath, char **configpath,
|
||||
}
|
||||
|
||||
/*
|
||||
* Trim off any trailing path separators because we construct paths
|
||||
* by appending to this path.
|
||||
* Trim off any trailing path separators because we construct paths by
|
||||
* appending to this path.
|
||||
*/
|
||||
#ifndef WIN32
|
||||
if ((*dirpath)[strlen(*dirpath) - 1] == '/')
|
||||
@ -398,10 +398,10 @@ void
|
||||
get_sock_dir(ClusterInfo *cluster, bool live_check)
|
||||
{
|
||||
#ifdef HAVE_UNIX_SOCKETS
|
||||
|
||||
/*
|
||||
* sockdir and port were added to postmaster.pid in PG 9.1.
|
||||
* Pre-9.1 cannot process pg_ctl -w for sockets in non-default
|
||||
* locations.
|
||||
* sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot
|
||||
* process pg_ctl -w for sockets in non-default locations.
|
||||
*/
|
||||
if (GET_MAJOR_VERSION(cluster->major_version) >= 901)
|
||||
{
|
||||
@ -415,26 +415,28 @@ get_sock_dir(ClusterInfo *cluster, bool live_check)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If we are doing a live check, we will use the old cluster's Unix
|
||||
* domain socket directory so we can connect to the live server.
|
||||
* If we are doing a live check, we will use the old cluster's
|
||||
* Unix domain socket directory so we can connect to the live
|
||||
* server.
|
||||
*/
|
||||
unsigned short orig_port = cluster->port;
|
||||
char filename[MAXPGPATH], line[MAXPGPATH];
|
||||
FILE *fp;
|
||||
char filename[MAXPGPATH],
|
||||
line[MAXPGPATH];
|
||||
FILE *fp;
|
||||
int lineno;
|
||||
|
||||
|
||||
snprintf(filename, sizeof(filename), "%s/postmaster.pid",
|
||||
cluster->pgdata);
|
||||
if ((fp = fopen(filename, "r")) == NULL)
|
||||
pg_log(PG_FATAL, "Cannot open file %s: %m\n", filename);
|
||||
|
||||
|
||||
for (lineno = 1;
|
||||
lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR);
|
||||
lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR);
|
||||
lineno++)
|
||||
{
|
||||
if (fgets(line, sizeof(line), fp) == NULL)
|
||||
pg_log(PG_FATAL, "Cannot read line %d from %s: %m\n", lineno, filename);
|
||||
|
||||
|
||||
/* potentially overwrite user-supplied value */
|
||||
if (lineno == LOCK_FILE_LINE_PORT)
|
||||
sscanf(line, "%hu", &old_cluster.port);
|
||||
@ -446,18 +448,21 @@ get_sock_dir(ClusterInfo *cluster, bool live_check)
|
||||
}
|
||||
}
|
||||
fclose(fp);
|
||||
|
||||
|
||||
/* warn of port number correction */
|
||||
if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port)
|
||||
pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n",
|
||||
orig_port, cluster->port);
|
||||
orig_port, cluster->port);
|
||||
}
|
||||
}
|
||||
else
|
||||
/* Can't get sockdir and pg_ctl -w can't use a non-default, use default */
|
||||
cluster->sockdir = NULL;
|
||||
|
||||
#else /* !HAVE_UNIX_SOCKETS */
|
||||
/*
|
||||
* Can't get sockdir and pg_ctl -w can't use a non-default, use
|
||||
* default
|
||||
*/
|
||||
cluster->sockdir = NULL;
|
||||
#else /* !HAVE_UNIX_SOCKETS */
|
||||
cluster->sockdir = NULL;
|
||||
#endif
|
||||
}
|
||||
|
@ -59,11 +59,11 @@ setupPageConverter(void)
|
||||
if (newPageVersion != oldPageVersion)
|
||||
{
|
||||
/*
|
||||
* The clusters use differing page layouts, see if we can find a plugin
|
||||
* that knows how to convert from the old page layout to the new page
|
||||
* layout.
|
||||
* The clusters use differing page layouts, see if we can find a
|
||||
* plugin that knows how to convert from the old page layout to the
|
||||
* new page layout.
|
||||
*/
|
||||
|
||||
|
||||
if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL)
|
||||
pg_log(PG_FATAL, "could not find plugin to convert from old page layout to new page layout\n");
|
||||
|
||||
@ -161,6 +161,4 @@ loadConverterPlugin(uint16 newPageVersion, uint16 oldPageVersion)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <io.h>
|
||||
#endif
|
||||
|
||||
static int parallel_jobs;
|
||||
static int parallel_jobs;
|
||||
|
||||
#ifdef WIN32
|
||||
/*
|
||||
@ -28,31 +28,32 @@ static int parallel_jobs;
|
||||
* it can be passed to WaitForMultipleObjects(). We use two arrays
|
||||
* so the thread_handles array can be passed to WaitForMultipleObjects().
|
||||
*/
|
||||
HANDLE *thread_handles;
|
||||
HANDLE *thread_handles;
|
||||
|
||||
typedef struct {
|
||||
char log_file[MAXPGPATH];
|
||||
char opt_log_file[MAXPGPATH];
|
||||
char cmd[MAX_STRING];
|
||||
typedef struct
|
||||
{
|
||||
char log_file[MAXPGPATH];
|
||||
char opt_log_file[MAXPGPATH];
|
||||
char cmd[MAX_STRING];
|
||||
} exec_thread_arg;
|
||||
|
||||
typedef struct {
|
||||
DbInfoArr *old_db_arr;
|
||||
DbInfoArr *new_db_arr;
|
||||
char old_pgdata[MAXPGPATH];
|
||||
char new_pgdata[MAXPGPATH];
|
||||
char old_tablespace[MAXPGPATH];
|
||||
typedef struct
|
||||
{
|
||||
DbInfoArr *old_db_arr;
|
||||
DbInfoArr *new_db_arr;
|
||||
char old_pgdata[MAXPGPATH];
|
||||
char new_pgdata[MAXPGPATH];
|
||||
char old_tablespace[MAXPGPATH];
|
||||
} transfer_thread_arg;
|
||||
|
||||
exec_thread_arg **exec_thread_args;
|
||||
transfer_thread_arg **transfer_thread_args;
|
||||
|
||||
/* track current thread_args struct so reap_child() can be used for all cases */
|
||||
void **cur_thread_args;
|
||||
|
||||
DWORD win32_exec_prog(exec_thread_arg *args);
|
||||
DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args);
|
||||
void **cur_thread_args;
|
||||
|
||||
DWORD win32_exec_prog(exec_thread_arg *args);
|
||||
DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -67,11 +68,12 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
{
|
||||
va_list args;
|
||||
char cmd[MAX_STRING];
|
||||
|
||||
#ifndef WIN32
|
||||
pid_t child;
|
||||
#else
|
||||
HANDLE child;
|
||||
exec_thread_arg *new_arg;
|
||||
exec_thread_arg *new_arg;
|
||||
#endif
|
||||
|
||||
va_start(args, fmt);
|
||||
@ -85,8 +87,8 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
{
|
||||
/* parallel */
|
||||
#ifdef WIN32
|
||||
cur_thread_args = (void **)exec_thread_args;
|
||||
#endif
|
||||
cur_thread_args = (void **) exec_thread_args;
|
||||
#endif
|
||||
/* harvest any dead children */
|
||||
while (reap_child(false) == true)
|
||||
;
|
||||
@ -94,10 +96,10 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
/* must we wait for a dead child? */
|
||||
if (parallel_jobs >= user_opts.jobs)
|
||||
reap_child(true);
|
||||
|
||||
|
||||
/* set this before we start the job */
|
||||
parallel_jobs++;
|
||||
|
||||
|
||||
/* Ensure stdio state is quiesced before forking */
|
||||
fflush(NULL);
|
||||
|
||||
@ -112,22 +114,22 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
#else
|
||||
if (thread_handles == NULL)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
|
||||
exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *));
|
||||
|
||||
/*
|
||||
* For safety and performance, we keep the args allocated during
|
||||
* the entire life of the process, and we don't free the args
|
||||
* in a thread different from the one that allocated it.
|
||||
* For safety and performance, we keep the args allocated during
|
||||
* the entire life of the process, and we don't free the args in a
|
||||
* thread different from the one that allocated it.
|
||||
*/
|
||||
for (i = 0; i < user_opts.jobs; i++)
|
||||
exec_thread_args[i] = pg_malloc(sizeof(exec_thread_arg));
|
||||
}
|
||||
|
||||
/* use first empty array element */
|
||||
new_arg = exec_thread_args[parallel_jobs-1];
|
||||
new_arg = exec_thread_args[parallel_jobs - 1];
|
||||
|
||||
/* Can only pass one pointer into the function, so use a struct */
|
||||
strcpy(new_arg->log_file, log_file);
|
||||
@ -135,11 +137,11 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
strcpy(new_arg->cmd, cmd);
|
||||
|
||||
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
|
||||
new_arg, 0, NULL);
|
||||
new_arg, 0, NULL);
|
||||
if (child == 0)
|
||||
pg_log(PG_FATAL, "could not create worker thread: %s\n", strerror(errno));
|
||||
|
||||
thread_handles[parallel_jobs-1] = child;
|
||||
thread_handles[parallel_jobs - 1] = child;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -151,7 +153,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
DWORD
|
||||
win32_exec_prog(exec_thread_arg *args)
|
||||
{
|
||||
int ret;
|
||||
int ret;
|
||||
|
||||
ret = !exec_prog(args->log_file, args->opt_log_file, true, "%s", args->cmd);
|
||||
|
||||
@ -167,15 +169,16 @@ win32_exec_prog(exec_thread_arg *args)
|
||||
* This has the same API as transfer_all_new_dbs, except it does parallel execution
|
||||
* by transfering multiple tablespaces in parallel
|
||||
*/
|
||||
void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
char *old_pgdata, char *new_pgdata,
|
||||
char *old_tablespace)
|
||||
void
|
||||
parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
char *old_pgdata, char *new_pgdata,
|
||||
char *old_tablespace)
|
||||
{
|
||||
#ifndef WIN32
|
||||
pid_t child;
|
||||
#else
|
||||
HANDLE child;
|
||||
transfer_thread_arg *new_arg;
|
||||
transfer_thread_arg *new_arg;
|
||||
#endif
|
||||
|
||||
if (user_opts.jobs <= 1)
|
||||
@ -185,7 +188,7 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
{
|
||||
/* parallel */
|
||||
#ifdef WIN32
|
||||
cur_thread_args = (void **)transfer_thread_args;
|
||||
cur_thread_args = (void **) transfer_thread_args;
|
||||
#endif
|
||||
/* harvest any dead children */
|
||||
while (reap_child(false) == true)
|
||||
@ -194,10 +197,10 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
/* must we wait for a dead child? */
|
||||
if (parallel_jobs >= user_opts.jobs)
|
||||
reap_child(true);
|
||||
|
||||
|
||||
/* set this before we start the job */
|
||||
parallel_jobs++;
|
||||
|
||||
|
||||
/* Ensure stdio state is quiesced before forking */
|
||||
fflush(NULL);
|
||||
|
||||
@ -217,22 +220,22 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
#else
|
||||
if (thread_handles == NULL)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
|
||||
transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *));
|
||||
|
||||
/*
|
||||
* For safety and performance, we keep the args allocated during
|
||||
* the entire life of the process, and we don't free the args
|
||||
* in a thread different from the one that allocated it.
|
||||
* For safety and performance, we keep the args allocated during
|
||||
* the entire life of the process, and we don't free the args in a
|
||||
* thread different from the one that allocated it.
|
||||
*/
|
||||
for (i = 0; i < user_opts.jobs; i++)
|
||||
transfer_thread_args[i] = pg_malloc(sizeof(transfer_thread_arg));
|
||||
}
|
||||
|
||||
/* use first empty array element */
|
||||
new_arg = transfer_thread_args[parallel_jobs-1];
|
||||
new_arg = transfer_thread_args[parallel_jobs - 1];
|
||||
|
||||
/* Can only pass one pointer into the function, so use a struct */
|
||||
new_arg->old_db_arr = old_db_arr;
|
||||
@ -242,11 +245,11 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
strcpy(new_arg->old_tablespace, old_tablespace);
|
||||
|
||||
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
|
||||
new_arg, 0, NULL);
|
||||
new_arg, 0, NULL);
|
||||
if (child == 0)
|
||||
pg_log(PG_FATAL, "could not create worker thread: %s\n", strerror(errno));
|
||||
|
||||
thread_handles[parallel_jobs-1] = child;
|
||||
thread_handles[parallel_jobs - 1] = child;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -274,11 +277,11 @@ bool
|
||||
reap_child(bool wait_for_child)
|
||||
{
|
||||
#ifndef WIN32
|
||||
int work_status;
|
||||
int ret;
|
||||
int work_status;
|
||||
int ret;
|
||||
#else
|
||||
int thread_num;
|
||||
DWORD res;
|
||||
int thread_num;
|
||||
DWORD res;
|
||||
#endif
|
||||
|
||||
if (user_opts.jobs <= 1 || parallel_jobs == 0)
|
||||
@ -293,18 +296,17 @@ reap_child(bool wait_for_child)
|
||||
|
||||
if (WEXITSTATUS(work_status) != 0)
|
||||
pg_log(PG_FATAL, "child worker exited abnormally: %s\n", strerror(errno));
|
||||
|
||||
#else
|
||||
/* wait for one to finish */
|
||||
thread_num = WaitForMultipleObjects(parallel_jobs, thread_handles,
|
||||
false, wait_for_child ? INFINITE : 0);
|
||||
false, wait_for_child ? INFINITE : 0);
|
||||
|
||||
if (thread_num == WAIT_TIMEOUT || thread_num == WAIT_FAILED)
|
||||
return false;
|
||||
|
||||
/* compute thread index in active_threads */
|
||||
thread_num -= WAIT_OBJECT_0;
|
||||
|
||||
|
||||
/* get the result */
|
||||
GetExitCodeThread(thread_handles[thread_num], &res);
|
||||
if (res != 0)
|
||||
@ -313,18 +315,18 @@ reap_child(bool wait_for_child)
|
||||
/* dispose of handle to stop leaks */
|
||||
CloseHandle(thread_handles[thread_num]);
|
||||
|
||||
/* Move last slot into dead child's position */
|
||||
/* Move last slot into dead child's position */
|
||||
if (thread_num != parallel_jobs - 1)
|
||||
{
|
||||
void *tmp_args;
|
||||
|
||||
void *tmp_args;
|
||||
|
||||
thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
|
||||
|
||||
/*
|
||||
* We must swap the arg struct pointers because the thread we
|
||||
* just moved is active, and we must make sure it is not
|
||||
* reused by the next created thread. Instead, the new thread
|
||||
* will use the arg struct of the thread that just died.
|
||||
* We must swap the arg struct pointers because the thread we just
|
||||
* moved is active, and we must make sure it is not reused by the next
|
||||
* created thread. Instead, the new thread will use the arg struct of
|
||||
* the thread that just died.
|
||||
*/
|
||||
tmp_args = cur_thread_args[thread_num];
|
||||
cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
|
||||
|
@ -134,7 +134,7 @@ main(int argc, char **argv)
|
||||
disable_old_cluster();
|
||||
|
||||
transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
|
||||
old_cluster.pgdata, new_cluster.pgdata);
|
||||
old_cluster.pgdata, new_cluster.pgdata);
|
||||
|
||||
/*
|
||||
* Assuming OIDs are only used in system tables, there is no need to
|
||||
@ -193,14 +193,13 @@ setup(char *argv0, bool *live_check)
|
||||
if (pid_lock_file_exists(old_cluster.pgdata))
|
||||
{
|
||||
/*
|
||||
* If we have a postmaster.pid file, try to start the server. If
|
||||
* it starts, the pid file was stale, so stop the server. If it
|
||||
* doesn't start, assume the server is running. If the pid file
|
||||
* is left over from a server crash, this also allows any committed
|
||||
* transactions stored in the WAL to be replayed so they are not
|
||||
* lost, because WAL files are not transfered from old to new
|
||||
* servers.
|
||||
*/
|
||||
* If we have a postmaster.pid file, try to start the server. If it
|
||||
* starts, the pid file was stale, so stop the server. If it doesn't
|
||||
* start, assume the server is running. If the pid file is left over
|
||||
* from a server crash, this also allows any committed transactions
|
||||
* stored in the WAL to be replayed so they are not lost, because WAL
|
||||
* files are not transfered from old to new servers.
|
||||
*/
|
||||
if (start_postmaster(&old_cluster, false))
|
||||
stop_postmaster(false);
|
||||
else
|
||||
@ -220,7 +219,7 @@ setup(char *argv0, bool *live_check)
|
||||
stop_postmaster(false);
|
||||
else
|
||||
pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
|
||||
"Please shutdown that postmaster and try again.\n");
|
||||
"Please shutdown that postmaster and try again.\n");
|
||||
}
|
||||
|
||||
/* get path to pg_upgrade executable */
|
||||
@ -312,9 +311,9 @@ create_new_objects(void)
|
||||
prep_status("Adding support functions to new cluster");
|
||||
|
||||
/*
|
||||
* Technically, we only need to install these support functions in new
|
||||
* databases that also exist in the old cluster, but for completeness
|
||||
* we process all new databases.
|
||||
* Technically, we only need to install these support functions in new
|
||||
* databases that also exist in the old cluster, but for completeness we
|
||||
* process all new databases.
|
||||
*/
|
||||
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
|
||||
{
|
||||
@ -330,21 +329,22 @@ create_new_objects(void)
|
||||
|
||||
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
|
||||
{
|
||||
char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
|
||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
||||
char sql_file_name[MAXPGPATH],
|
||||
log_file_name[MAXPGPATH];
|
||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
||||
|
||||
pg_log(PG_STATUS, "%s", old_db->db_name);
|
||||
snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
|
||||
snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
|
||||
|
||||
/*
|
||||
* pg_dump only produces its output at the end, so there is little
|
||||
* parallelism if using the pipe.
|
||||
* pg_dump only produces its output at the end, so there is little
|
||||
* parallelism if using the pipe.
|
||||
*/
|
||||
parallel_exec_prog(log_file_name, NULL,
|
||||
"\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
old_db->db_name, sql_file_name);
|
||||
"\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
|
||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||
old_db->db_name, sql_file_name);
|
||||
}
|
||||
|
||||
/* reap all children */
|
||||
@ -418,6 +418,7 @@ copy_clog_xlog_xid(void)
|
||||
copy_subdir_files("pg_multixact/offsets");
|
||||
copy_subdir_files("pg_multixact/members");
|
||||
prep_status("Setting next multixact ID and offset for new cluster");
|
||||
|
||||
/*
|
||||
* we preserve all files and contents, so we must preserve both "next"
|
||||
* counters here and the oldest multi present on system.
|
||||
@ -434,6 +435,7 @@ copy_clog_xlog_xid(void)
|
||||
else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
|
||||
{
|
||||
prep_status("Setting oldest multixact ID on new cluster");
|
||||
|
||||
/*
|
||||
* We don't preserve files in this case, but it's important that the
|
||||
* oldest multi is set to the latest value used by the old system, so
|
||||
@ -549,7 +551,6 @@ set_frozenxids(void)
|
||||
static void
|
||||
cleanup(void)
|
||||
{
|
||||
|
||||
fclose(log_opts.internal);
|
||||
|
||||
/* Remove dump and log files? */
|
||||
@ -567,8 +568,9 @@ cleanup(void)
|
||||
if (old_cluster.dbarr.dbs)
|
||||
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
|
||||
{
|
||||
char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
|
||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
||||
char sql_file_name[MAXPGPATH],
|
||||
log_file_name[MAXPGPATH];
|
||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
||||
|
||||
snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
|
||||
unlink(sql_file_name);
|
||||
|
@ -73,24 +73,24 @@ extern char *output_files[];
|
||||
#define pg_copy_file copy_file
|
||||
#define pg_mv_file rename
|
||||
#define pg_link_file link
|
||||
#define PATH_SEPARATOR '/'
|
||||
#define PATH_SEPARATOR '/'
|
||||
#define RM_CMD "rm -f"
|
||||
#define RMDIR_CMD "rm -rf"
|
||||
#define SCRIPT_EXT "sh"
|
||||
#define ECHO_QUOTE "'"
|
||||
#define ECHO_BLANK ""
|
||||
#define ECHO_BLANK ""
|
||||
#else
|
||||
#define pg_copy_file CopyFile
|
||||
#define pg_mv_file pgrename
|
||||
#define pg_link_file win32_pghardlink
|
||||
#define sleep(x) Sleep(x * 1000)
|
||||
#define PATH_SEPARATOR '\\'
|
||||
#define PATH_SEPARATOR '\\'
|
||||
#define RM_CMD "DEL /q"
|
||||
#define RMDIR_CMD "RMDIR /s/q"
|
||||
#define SCRIPT_EXT "bat"
|
||||
#define EXE_EXT ".exe"
|
||||
#define ECHO_QUOTE ""
|
||||
#define ECHO_BLANK "."
|
||||
#define ECHO_BLANK "."
|
||||
#endif
|
||||
|
||||
#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \
|
||||
@ -122,8 +122,8 @@ extern char *output_files[];
|
||||
typedef struct
|
||||
{
|
||||
/* Can't use NAMEDATALEN; not guaranteed to fit on client */
|
||||
char *nspname; /* namespace name */
|
||||
char *relname; /* relation name */
|
||||
char *nspname; /* namespace name */
|
||||
char *relname; /* relation name */
|
||||
Oid reloid; /* relation oid */
|
||||
Oid relfilenode; /* relation relfile node */
|
||||
/* relation tablespace path, or "" for the cluster default */
|
||||
@ -155,8 +155,8 @@ typedef struct
|
||||
Oid old_relfilenode;
|
||||
Oid new_relfilenode;
|
||||
/* the rest are used only for logging and error reporting */
|
||||
char *nspname; /* namespaces */
|
||||
char *relname;
|
||||
char *nspname; /* namespaces */
|
||||
char *relname;
|
||||
} FileNameMap;
|
||||
|
||||
/*
|
||||
@ -165,7 +165,7 @@ typedef struct
|
||||
typedef struct
|
||||
{
|
||||
Oid db_oid; /* oid of the database */
|
||||
char *db_name; /* database name */
|
||||
char *db_name; /* database name */
|
||||
char db_tblspace[MAXPGPATH]; /* database default tablespace path */
|
||||
RelInfoArr rel_arr; /* array of all user relinfos */
|
||||
} DbInfo;
|
||||
@ -254,8 +254,8 @@ typedef struct
|
||||
char major_version_str[64]; /* string PG_VERSION of cluster */
|
||||
uint32 bin_version; /* version returned from pg_ctl */
|
||||
Oid pg_database_oid; /* OID of pg_database relation */
|
||||
Oid install_role_oid; /* OID of connected role */
|
||||
Oid role_count; /* number of roles defined in the cluster */
|
||||
Oid install_role_oid; /* OID of connected role */
|
||||
Oid role_count; /* number of roles defined in the cluster */
|
||||
char *tablespace_suffix; /* directory specification */
|
||||
} ClusterInfo;
|
||||
|
||||
@ -312,12 +312,12 @@ extern OSInfo os_info;
|
||||
/* check.c */
|
||||
|
||||
void output_check_banner(bool live_check);
|
||||
void check_and_dump_old_cluster(bool live_check,
|
||||
char **sequence_script_file_name);
|
||||
void check_and_dump_old_cluster(bool live_check,
|
||||
char **sequence_script_file_name);
|
||||
void check_new_cluster(void);
|
||||
void report_clusters_compatible(void);
|
||||
void issue_warnings(char *sequence_script_file_name);
|
||||
void output_completion_banner(char *analyze_script_file_name,
|
||||
void output_completion_banner(char *analyze_script_file_name,
|
||||
char *deletion_script_file_name);
|
||||
void check_cluster_versions(void);
|
||||
void check_cluster_compatibility(bool live_check);
|
||||
@ -413,11 +413,11 @@ void get_sock_dir(ClusterInfo *cluster, bool live_check);
|
||||
/* relfilenode.c */
|
||||
|
||||
void get_pg_database_relfilenode(ClusterInfo *cluster);
|
||||
void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
|
||||
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
|
||||
void transfer_all_new_dbs(DbInfoArr *old_db_arr,
|
||||
void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
|
||||
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
|
||||
void transfer_all_new_dbs(DbInfoArr *old_db_arr,
|
||||
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata,
|
||||
char *old_tablespace);
|
||||
char *old_tablespace);
|
||||
|
||||
/* tablespace.c */
|
||||
|
||||
@ -477,11 +477,11 @@ void old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
|
||||
char *old_8_3_create_sequence_script(ClusterInfo *cluster);
|
||||
|
||||
/* parallel.c */
|
||||
void parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
const char *fmt,...)
|
||||
void
|
||||
parallel_exec_prog(const char *log_file, const char *opt_log_file,
|
||||
const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
|
||||
void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
char *old_pgdata, char *new_pgdata,
|
||||
char *old_tablespace);
|
||||
void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
char *old_pgdata, char *new_pgdata,
|
||||
char *old_tablespace);
|
||||
bool reap_child(bool wait_for_child);
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
static void transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
FileNameMap *maps, int size, char *old_tablespace);
|
||||
static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
|
||||
const char *suffix);
|
||||
const char *suffix);
|
||||
|
||||
|
||||
/*
|
||||
@ -29,32 +29,32 @@ static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
|
||||
*/
|
||||
void
|
||||
transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
char *old_pgdata, char *new_pgdata)
|
||||
char *old_pgdata, char *new_pgdata)
|
||||
{
|
||||
pg_log(PG_REPORT, "%s user relation files\n",
|
||||
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
|
||||
|
||||
/*
|
||||
* Transfering files by tablespace is tricky because a single database
|
||||
* can use multiple tablespaces. For non-parallel mode, we just pass a
|
||||
* NULL tablespace path, which matches all tablespaces. In parallel mode,
|
||||
* we pass the default tablespace and all user-created tablespaces
|
||||
* and let those operations happen in parallel.
|
||||
* Transfering files by tablespace is tricky because a single database can
|
||||
* use multiple tablespaces. For non-parallel mode, we just pass a NULL
|
||||
* tablespace path, which matches all tablespaces. In parallel mode, we
|
||||
* pass the default tablespace and all user-created tablespaces and let
|
||||
* those operations happen in parallel.
|
||||
*/
|
||||
if (user_opts.jobs <= 1)
|
||||
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
|
||||
new_pgdata, NULL);
|
||||
else
|
||||
{
|
||||
int tblnum;
|
||||
int tblnum;
|
||||
|
||||
/* transfer default tablespace */
|
||||
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
|
||||
new_pgdata, old_pgdata);
|
||||
new_pgdata, old_pgdata);
|
||||
|
||||
for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
|
||||
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
|
||||
new_pgdata, os_info.old_tablespaces[tblnum]);
|
||||
new_pgdata, os_info.old_tablespaces[tblnum]);
|
||||
/* reap all children */
|
||||
while (reap_child(true) == true)
|
||||
;
|
||||
@ -75,7 +75,7 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
*/
|
||||
void
|
||||
transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
|
||||
char *old_pgdata, char *new_pgdata, char *old_tablespace)
|
||||
char *old_pgdata, char *new_pgdata, char *old_tablespace)
|
||||
{
|
||||
int old_dbnum,
|
||||
new_dbnum;
|
||||
@ -170,11 +170,11 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
{
|
||||
int mapnum;
|
||||
bool vm_crashsafe_match = true;
|
||||
|
||||
|
||||
/*
|
||||
* Do the old and new cluster disagree on the crash-safetiness of the vm
|
||||
* files? If so, do not copy them.
|
||||
*/
|
||||
* files? If so, do not copy them.
|
||||
*/
|
||||
if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
|
||||
new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
|
||||
vm_crashsafe_match = false;
|
||||
@ -186,7 +186,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
{
|
||||
/* transfer primary file */
|
||||
transfer_relfile(pageConverter, &maps[mapnum], "");
|
||||
|
||||
|
||||
/* fsm/vm files added in PG 8.4 */
|
||||
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
|
||||
{
|
||||
@ -217,13 +217,11 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
|
||||
int fd;
|
||||
int segno;
|
||||
char extent_suffix[65];
|
||||
|
||||
|
||||
/*
|
||||
* Now copy/link any related segments as well. Remember, PG breaks
|
||||
* large files into 1GB segments, the first segment has no extension,
|
||||
* subsequent segments are named relfilenode.1, relfilenode.2,
|
||||
* relfilenode.3.
|
||||
* copied.
|
||||
* Now copy/link any related segments as well. Remember, PG breaks large
|
||||
* files into 1GB segments, the first segment has no extension, subsequent
|
||||
* segments are named relfilenode.1, relfilenode.2, relfilenode.3. copied.
|
||||
*/
|
||||
for (segno = 0;; segno++)
|
||||
{
|
||||
@ -233,12 +231,12 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
|
||||
snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno);
|
||||
|
||||
snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", map->old_tablespace,
|
||||
map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
|
||||
map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
|
||||
type_suffix, extent_suffix);
|
||||
snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", map->new_tablespace,
|
||||
map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
|
||||
map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
|
||||
type_suffix, extent_suffix);
|
||||
|
||||
|
||||
/* Is it an extent, fsm, or vm file? */
|
||||
if (type_suffix[0] != '\0' || segno != 0)
|
||||
{
|
||||
@ -257,18 +255,18 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
|
||||
}
|
||||
|
||||
unlink(new_file);
|
||||
|
||||
|
||||
/* Copying files might take some time, so give feedback. */
|
||||
pg_log(PG_STATUS, "%s", old_file);
|
||||
|
||||
|
||||
if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
|
||||
pg_log(PG_FATAL, "This upgrade requires page-by-page conversion, "
|
||||
"you must use copy mode instead of link mode.\n");
|
||||
|
||||
|
||||
if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
|
||||
{
|
||||
pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file);
|
||||
|
||||
|
||||
if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
|
||||
pg_log(PG_FATAL, "error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
|
||||
map->nspname, map->relname, old_file, new_file, msg);
|
||||
@ -276,14 +274,13 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
|
||||
else
|
||||
{
|
||||
pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file);
|
||||
|
||||
|
||||
if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
|
||||
pg_log(PG_FATAL,
|
||||
"error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
|
||||
map->nspname, map->relname, old_file, new_file, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ get_db_conn(ClusterInfo *cluster, const char *db_name)
|
||||
char *
|
||||
cluster_conn_opts(ClusterInfo *cluster)
|
||||
{
|
||||
static char conn_opts[MAXPGPATH + NAMEDATALEN + 100];
|
||||
static char conn_opts[MAXPGPATH + NAMEDATALEN + 100];
|
||||
|
||||
if (cluster->sockdir)
|
||||
snprintf(conn_opts, sizeof(conn_opts),
|
||||
@ -192,7 +192,7 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
|
||||
strcat(socket_string,
|
||||
" -c listen_addresses='' -c unix_socket_permissions=0700");
|
||||
|
||||
/* Have a sockdir? Tell the postmaster. */
|
||||
/* Have a sockdir? Tell the postmaster. */
|
||||
if (cluster->sockdir)
|
||||
snprintf(socket_string + strlen(socket_string),
|
||||
sizeof(socket_string) - strlen(socket_string),
|
||||
@ -215,13 +215,13 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
|
||||
* win on ext4.
|
||||
*/
|
||||
snprintf(cmd, sizeof(cmd),
|
||||
"\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
|
||||
"\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
|
||||
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
|
||||
(cluster->controldata.cat_ver >=
|
||||
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" :
|
||||
" -c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
|
||||
(cluster == &new_cluster) ?
|
||||
" -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "",
|
||||
" -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "",
|
||||
cluster->pgopts ? cluster->pgopts : "", socket_string);
|
||||
|
||||
/*
|
||||
@ -229,7 +229,7 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
|
||||
* it might supply a reason for the failure.
|
||||
*/
|
||||
pg_ctl_return = exec_prog(SERVER_START_LOG_FILE,
|
||||
/* pass both file names if they differ */
|
||||
/* pass both file names if they differ */
|
||||
(strcmp(SERVER_LOG_FILE,
|
||||
SERVER_START_LOG_FILE) != 0) ?
|
||||
SERVER_LOG_FILE : NULL,
|
||||
@ -238,7 +238,7 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
|
||||
|
||||
if (!pg_ctl_return && !throw_error)
|
||||
return false;
|
||||
|
||||
|
||||
/* Check to see if we can connect to the server; if not, report it. */
|
||||
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
|
||||
PQstatus(conn) != CONNECTION_OK)
|
||||
|
@ -59,7 +59,7 @@ get_tablespace_paths(void)
|
||||
|
||||
if ((os_info.num_old_tablespaces = PQntuples(res)) != 0)
|
||||
os_info.old_tablespaces = (char **) pg_malloc(
|
||||
os_info.num_old_tablespaces * sizeof(char *));
|
||||
os_info.num_old_tablespaces * sizeof(char *));
|
||||
else
|
||||
os_info.old_tablespaces = NULL;
|
||||
|
||||
|
@ -40,8 +40,8 @@ void
|
||||
end_progress_output(void)
|
||||
{
|
||||
/*
|
||||
* In case nothing printed; pass a space so gcc doesn't complain about
|
||||
* empty format string.
|
||||
* In case nothing printed; pass a space so gcc doesn't complain about
|
||||
* empty format string.
|
||||
*/
|
||||
prep_status(" ");
|
||||
}
|
||||
@ -114,13 +114,13 @@ pg_log(eLogType type, char *fmt,...)
|
||||
/* for output to a display, do leading truncation and append \r */
|
||||
if (isatty(fileno(stdout)))
|
||||
/* -2 because we use a 2-space indent */
|
||||
printf(" %s%-*.*s\r",
|
||||
/* prefix with "..." if we do leading truncation */
|
||||
strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...",
|
||||
MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2,
|
||||
/* optional leading truncation */
|
||||
strlen(message) <= MESSAGE_WIDTH - 2 ? message :
|
||||
message + strlen(message) - MESSAGE_WIDTH + 3 + 2);
|
||||
printf(" %s%-*.*s\r",
|
||||
/* prefix with "..." if we do leading truncation */
|
||||
strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...",
|
||||
MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2,
|
||||
/* optional leading truncation */
|
||||
strlen(message) <= MESSAGE_WIDTH - 2 ? message :
|
||||
message + strlen(message) - MESSAGE_WIDTH + 3 + 2);
|
||||
else
|
||||
printf(" %s\n", _(message));
|
||||
break;
|
||||
|
Reference in New Issue
Block a user