diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index 08cad68199f..11582dd1c82 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -46,9 +46,10 @@ PostgreSQL documentation
- pg_dump only dumps a single database. To backup
- global objects that are common to all databases in a cluster, such as roles
- and tablespaces, use .
+ pg_dump only dumps a single database.
+ To back up an entire cluster, or to back up global objects that are
+ common to all databases in a cluster (such as roles and tablespaces),
+ use .
@@ -142,7 +143,8 @@ PostgreSQL documentation
switch is therefore only useful to add large objects to dumps
where a specific schema or table has been requested. Note that
blobs are considered data and therefore will be included when
- --data-only is used, but not when --schema-only is.
+ is used, but not
+ when is.
@@ -196,6 +198,17 @@ PostgreSQL documentation
recreates the target database before reconnecting to it.
+
+ With , the output also includes the
+ database's comment if any, and any configuration variable settings
+ that are specific to this database, that is,
+ any ALTER DATABASE ... SET ...
+ and ALTER ROLE ... IN DATABASE ... SET ...
+ commands that mention this database.
+ Access privileges for the database itself are also dumped,
+ unless is specified.
+
+
This option is only meaningful for the plain-text format. For
the archive formats, you can specify the option when you
@@ -1231,10 +1244,6 @@ CREATE DATABASE foo WITH TEMPLATE template0;
ANALYZE after restoring from a dump file
to ensure optimal performance; see
and for more information.
- The dump file also does not
- contain any ALTER DATABASE ... SET commands;
- these settings are dumped by ,
- along with database users and other installation-wide settings.
@@ -1325,6 +1334,15 @@ CREATE DATABASE foo WITH TEMPLATE template0;
+
+ To reload an archive file into the same database it was dumped from,
+ discarding the current contents of that database:
+
+
+$pg_restore -d postgres --clean --create db.dump
+
+
+
To dump a single table named mytab:
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index 5196a211b1e..4a639f2d41e 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -36,13 +36,10 @@ PostgreSQL documentation
of a cluster into one script file. The script file contains
SQL commands that can be used as input to to restore the databases. It does this by
- calling for each database in a cluster.
+ calling for each database in the cluster.
pg_dumpall also dumps global objects
- that are common to all databases.
+ that are common to all databases, that is, database roles and tablespaces.
(pg_dump does not save these objects.)
- This currently includes information about database users and
- groups, tablespaces, and properties such as access permissions
- that apply to databases as a whole.
@@ -50,7 +47,7 @@ PostgreSQL documentation
databases you will most likely have to connect as a database
superuser in order to produce a complete dump. Also you will need
superuser privileges to execute the saved script in order to be
- allowed to add users and groups, and to create databases.
+ allowed to add roles and create databases.
@@ -308,7 +305,7 @@ PostgreSQL documentation
Use conditional commands (i.e. add an IF EXISTS
- clause) to clean databases and other objects. This option is not valid
+ clause) to drop databases and other objects. This option is not valid
unless is also specified.
@@ -500,10 +497,11 @@ PostgreSQL documentation
The option is called --dbname for consistency with other
client applications, but because pg_dumpall
- needs to connect to many databases, database name in the connection
- string will be ignored. Use -l option to specify
- the name of the database used to dump global objects and to discover
- what other databases should be dumped.
+ needs to connect to many databases, the database name in the
+ connection string will be ignored. Use the -l
+ option to specify the name of the database used for the initial
+ connection, which will dump global objects and discover what other
+ databases should be dumped.
@@ -657,6 +655,17 @@ PostgreSQL documentation
messages will refer to pg_dump.
+
+ The option can be useful even when your
+ intention is to restore the dump script into a fresh cluster. Use of
+ authorizes the script to drop and re-create the
+ built-in postgres and template1
+ databases, ensuring that those databases will retain the same properties
+ (for instance, locale and encoding) that they had in the source cluster.
+ Without the option, those databases will retain their existing
+ database-level properties, as well as any pre-existing contents.
+
+
Once restored, it is wise to run ANALYZE on each
database so the optimizer has useful statistics. You
@@ -664,6 +673,18 @@ PostgreSQL documentation
databases.
+
+ The dump script should not be expected to run completely without errors.
+ In particular, because the script will issue CREATE ROLE
+ for every role existing in the source cluster, it is certain to get a
+ role already exists error for the bootstrap superuser,
+ unless the destination cluster was initialized with a different bootstrap
+ superuser name. This error is harmless and should be ignored. Use of
+ the option is likely to produce additional
+ harmless error messages about non-existent objects, although you can
+ minimize those by adding .
+
+
pg_dumpall requires all needed
tablespace directories to exist before the restore; otherwise,
@@ -688,10 +709,13 @@ PostgreSQL documentation
$psql -f db.out postgres
- (It is not important to which database you connect here since the
+ It is not important to which database you connect here since the
script file created by pg_dumpall will
contain the appropriate commands to create and connect to the saved
- databases.)
+ databases. An exception is that if you specified ,
+ you must connect to the postgres database initially;
+ the script will attempt to drop other databases immediately, and that
+ will fail for the database you are connected to.
diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml
index 9946b94e84d..a2ebf75ebb5 100644
--- a/doc/src/sgml/ref/pg_restore.sgml
+++ b/doc/src/sgml/ref/pg_restore.sgml
@@ -126,6 +126,17 @@
recreate the target database before connecting to it.
+
+ With , pg_restore
+ also restores the database's comment if any, and any configuration
+ variable settings that are specific to this database, that is,
+ any ALTER DATABASE ... SET ...
+ and ALTER ROLE ... IN DATABASE ... SET ...
+ commands that mention this database.
+ Access privileges for the database itself are also restored,
+ unless is specified.
+
+
When this option is used, the database named with
is used only to issue the initial DROP DATABASE and
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index 32ad600fd08..7afddc31533 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -807,3 +807,54 @@ buildACLQueries(PQExpBuffer acl_subquery, PQExpBuffer racl_subquery,
printfPQExpBuffer(init_racl_subquery, "NULL");
}
}
+
+/*
+ * Helper function for dumping "ALTER DATABASE/ROLE SET ..." commands.
+ *
+ * Parse the contents of configitem (a "name=value" string), wrap it in
+ * a complete ALTER command, and append it to buf.
+ *
+ * type is DATABASE or ROLE, and name is the name of the database or role.
+ * If we need an "IN" clause, type2 and name2 similarly define what to put
+ * there; otherwise they should be NULL.
+ * conn is used only to determine string-literal quoting conventions.
+ */
+void
+makeAlterConfigCommand(PGconn *conn, const char *configitem,
+ const char *type, const char *name,
+ const char *type2, const char *name2,
+ PQExpBuffer buf)
+{
+ char *mine;
+ char *pos;
+
+ /* Parse the configitem. If we can't find an "=", silently do nothing. */
+ mine = pg_strdup(configitem);
+ pos = strchr(mine, '=');
+ if (pos == NULL)
+ {
+ pg_free(mine);
+ return;
+ }
+ *pos++ = '\0';
+
+ /* Build the command, with suitable quoting for everything. */
+ appendPQExpBuffer(buf, "ALTER %s %s ", type, fmtId(name));
+ if (type2 != NULL && name2 != NULL)
+ appendPQExpBuffer(buf, "IN %s %s ", type2, fmtId(name2));
+ appendPQExpBuffer(buf, "SET %s TO ", fmtId(mine));
+
+ /*
+ * Some GUC variable names are 'LIST' type and hence must not be quoted.
+ * XXX this list is incomplete ...
+ */
+ if (pg_strcasecmp(mine, "DateStyle") == 0
+ || pg_strcasecmp(mine, "search_path") == 0)
+ appendPQExpBufferStr(buf, pos);
+ else
+ appendStringLiteralConn(buf, pos, conn);
+
+ appendPQExpBufferStr(buf, ";\n");
+
+ pg_free(mine);
+}
diff --git a/src/bin/pg_dump/dumputils.h b/src/bin/pg_dump/dumputils.h
index d5f150dfa06..23a0645be8a 100644
--- a/src/bin/pg_dump/dumputils.h
+++ b/src/bin/pg_dump/dumputils.h
@@ -56,4 +56,9 @@ extern void buildACLQueries(PQExpBuffer acl_subquery, PQExpBuffer racl_subquery,
const char *acl_column, const char *acl_owner,
const char *obj_kind, bool binary_upgrade);
+extern void makeAlterConfigCommand(PGconn *conn, const char *configitem,
+ const char *type, const char *name,
+ const char *type2, const char *name2,
+ PQExpBuffer buf);
+
#endif /* DUMPUTILS_H */
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index acef20fdf72..ab009e6fe36 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -489,16 +489,19 @@ RestoreArchive(Archive *AHX)
* whole. Issuing drops against anything else would be wrong,
* because at this point we're connected to the wrong database.
* Conversely, if we're not in createDB mode, we'd better not
- * issue a DROP against the database at all.
+ * issue a DROP against the database at all. (The DATABASE
+ * PROPERTIES entry, if any, works like the DATABASE entry.)
*/
if (ropt->createDB)
{
- if (strcmp(te->desc, "DATABASE") != 0)
+ if (strcmp(te->desc, "DATABASE") != 0 &&
+ strcmp(te->desc, "DATABASE PROPERTIES") != 0)
continue;
}
else
{
- if (strcmp(te->desc, "DATABASE") == 0)
+ if (strcmp(te->desc, "DATABASE") == 0 ||
+ strcmp(te->desc, "DATABASE PROPERTIES") == 0)
continue;
}
@@ -558,6 +561,8 @@ RestoreArchive(Archive *AHX)
* we simply emit the original command for DEFAULT
* objects (modulo the adjustment made above).
*
+ * Likewise, don't mess with DATABASE PROPERTIES.
+ *
* If we used CREATE OR REPLACE VIEW as a means of
* quasi-dropping an ON SELECT rule, that should
* be emitted unchanged as well.
@@ -570,6 +575,7 @@ RestoreArchive(Archive *AHX)
* search for hardcoded "DROP CONSTRAINT" instead.
*/
if (strcmp(te->desc, "DEFAULT") == 0 ||
+ strcmp(te->desc, "DATABASE PROPERTIES") == 0 ||
strncmp(dropStmt, "CREATE OR REPLACE VIEW", 22) == 0)
appendPQExpBufferStr(ftStmt, dropStmt);
else
@@ -750,11 +756,19 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
reqs = te->reqs;
/*
- * Ignore DATABASE entry unless we should create it. We must check this
- * here, not in _tocEntryRequired, because the createDB option should not
- * affect emitting a DATABASE entry to an archive file.
+ * Ignore DATABASE and related entries unless createDB is specified. We
+ * must check this here, not in _tocEntryRequired, because !createDB
+ * should not prevent emitting these entries to an archive file.
*/
- if (!ropt->createDB && strcmp(te->desc, "DATABASE") == 0)
+ if (!ropt->createDB &&
+ (strcmp(te->desc, "DATABASE") == 0 ||
+ strcmp(te->desc, "DATABASE PROPERTIES") == 0 ||
+ (strcmp(te->desc, "ACL") == 0 &&
+ strncmp(te->tag, "DATABASE ", 9) == 0) ||
+ (strcmp(te->desc, "COMMENT") == 0 &&
+ strncmp(te->tag, "DATABASE ", 9) == 0) ||
+ (strcmp(te->desc, "SECURITY LABEL") == 0 &&
+ strncmp(te->tag, "DATABASE ", 9) == 0)))
reqs = 0;
/* Dump any relevant dump warnings to stderr */
@@ -2917,8 +2931,8 @@ _tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt)
* Special Case: If 'SEQUENCE SET' or anything to do with BLOBs, then
* it is considered a data entry. We don't need to check for the
* BLOBS entry or old-style BLOB COMMENTS, because they will have
- * hadDumper = true ... but we do need to check new-style BLOB
- * comments.
+ * hadDumper = true ... but we do need to check new-style BLOB ACLs,
+ * comments, etc.
*/
if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
strcmp(te->desc, "BLOB") == 0 ||
@@ -3598,6 +3612,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
else if (strcmp(te->desc, "CAST") == 0 ||
strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
strcmp(te->desc, "CONSTRAINT") == 0 ||
+ strcmp(te->desc, "DATABASE PROPERTIES") == 0 ||
strcmp(te->desc, "DEFAULT") == 0 ||
strcmp(te->desc, "FK CONSTRAINT") == 0 ||
strcmp(te->desc, "INDEX") == 0 ||
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 1dc1d80ab13..11e1ba04cc4 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -252,6 +252,8 @@ static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
static void dumpDatabase(Archive *AH);
+static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
+ const char *dbname, Oid dboid);
static void dumpEncoding(Archive *AH);
static void dumpStdStrings(Archive *AH);
static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
@@ -838,7 +840,7 @@ main(int argc, char **argv)
dumpEncoding(fout);
dumpStdStrings(fout);
- /* The database item is always next, unless we don't want it at all */
+ /* The database items are always next, unless we don't want them at all */
if (dopt.include_everything && !dopt.dataOnly)
dumpDatabase(fout);
@@ -2540,6 +2542,10 @@ dumpDatabase(Archive *fout)
i_ctype,
i_frozenxid,
i_minmxid,
+ i_datacl,
+ i_rdatacl,
+ i_datistemplate,
+ i_datconnlimit,
i_tablespace;
CatalogId dbCatId;
DumpId dbDumpId;
@@ -2548,11 +2554,17 @@ dumpDatabase(Archive *fout)
*encoding,
*collate,
*ctype,
+ *datacl,
+ *rdatacl,
+ *datistemplate,
+ *datconnlimit,
*tablespace;
uint32 frozenxid,
minmxid;
+ char *qdatname;
datname = PQdb(conn);
+ qdatname = pg_strdup(fmtId(datname));
if (g_verbose)
write_msg(NULL, "saving database definition\n");
@@ -2560,13 +2572,37 @@ dumpDatabase(Archive *fout)
/* Make sure we are in proper schema */
selectSourceSchema(fout, "pg_catalog");
- /* Get the database owner and parameters from pg_database */
- if (fout->remoteVersion >= 90300)
+ /* Fetch the database-level properties for this database */
+ if (fout->remoteVersion >= 90600)
{
appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
"(%s datdba) AS dba, "
"pg_encoding_to_char(encoding) AS encoding, "
"datcollate, datctype, datfrozenxid, datminmxid, "
+ "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
+ " SELECT unnest(coalesce(datacl,acldefault('d',datdba))) AS acl "
+ " EXCEPT SELECT unnest(acldefault('d',datdba))) as datacls)"
+ " AS datacl, "
+ "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
+ " SELECT unnest(acldefault('d',datdba)) AS acl "
+ " EXCEPT SELECT unnest(coalesce(datacl,acldefault('d',datdba)))) as rdatacls)"
+ " AS rdatacl, "
+ "datistemplate, datconnlimit, "
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
+ "shobj_description(oid, 'pg_database') AS description "
+
+ "FROM pg_database "
+ "WHERE datname = ",
+ username_subquery);
+ appendStringLiteralAH(dbQry, datname, fout);
+ }
+ else if (fout->remoteVersion >= 90300)
+ {
+ appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
+ "(%s datdba) AS dba, "
+ "pg_encoding_to_char(encoding) AS encoding, "
+ "datcollate, datctype, datfrozenxid, datminmxid, "
+ "datacl, '' as rdatacl, datistemplate, datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
"shobj_description(oid, 'pg_database') AS description "
@@ -2581,6 +2617,7 @@ dumpDatabase(Archive *fout)
"(%s datdba) AS dba, "
"pg_encoding_to_char(encoding) AS encoding, "
"datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
+ "datacl, '' as rdatacl, datistemplate, datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
"shobj_description(oid, 'pg_database') AS description "
@@ -2595,6 +2632,7 @@ dumpDatabase(Archive *fout)
"(%s datdba) AS dba, "
"pg_encoding_to_char(encoding) AS encoding, "
"NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
+ "datacl, '' as rdatacl, datistemplate, datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
"shobj_description(oid, 'pg_database') AS description "
@@ -2609,6 +2647,8 @@ dumpDatabase(Archive *fout)
"(%s datdba) AS dba, "
"pg_encoding_to_char(encoding) AS encoding, "
"NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
+ "datacl, '' as rdatacl, datistemplate, "
+ "-1 as datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
"FROM pg_database "
"WHERE datname = ",
@@ -2626,6 +2666,10 @@ dumpDatabase(Archive *fout)
i_ctype = PQfnumber(res, "datctype");
i_frozenxid = PQfnumber(res, "datfrozenxid");
i_minmxid = PQfnumber(res, "datminmxid");
+ i_datacl = PQfnumber(res, "datacl");
+ i_rdatacl = PQfnumber(res, "rdatacl");
+ i_datistemplate = PQfnumber(res, "datistemplate");
+ i_datconnlimit = PQfnumber(res, "datconnlimit");
i_tablespace = PQfnumber(res, "tablespace");
dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
@@ -2636,10 +2680,20 @@ dumpDatabase(Archive *fout)
ctype = PQgetvalue(res, 0, i_ctype);
frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
+ datacl = PQgetvalue(res, 0, i_datacl);
+ rdatacl = PQgetvalue(res, 0, i_rdatacl);
+ datistemplate = PQgetvalue(res, 0, i_datistemplate);
+ datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
tablespace = PQgetvalue(res, 0, i_tablespace);
+ /*
+ * Prepare the CREATE DATABASE command. We must specify encoding, locale,
+ * and tablespace since those can't be altered later. Other DB properties
+ * are left to the DATABASE PROPERTIES entry, so that they can be applied
+ * after reconnecting to the target DB.
+ */
appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
- fmtId(datname));
+ qdatname);
if (strlen(encoding) > 0)
{
appendPQExpBufferStr(creaQry, " ENCODING = ");
@@ -2655,26 +2709,23 @@ dumpDatabase(Archive *fout)
appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
appendStringLiteralAH(creaQry, ctype, fout);
}
+
+ /*
+ * Note: looking at dopt->outputNoTablespaces here is completely the wrong
+ * thing; the decision whether to specify a tablespace should be left till
+ * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
+ * label the DATABASE entry with the tablespace and let the normal
+ * tablespace selection logic work ... but CREATE DATABASE doesn't pay
+ * attention to default_tablespace, so that won't work.
+ */
if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
!dopt->outputNoTablespaces)
appendPQExpBuffer(creaQry, " TABLESPACE = %s",
fmtId(tablespace));
appendPQExpBufferStr(creaQry, ";\n");
- if (dopt->binary_upgrade)
- {
- appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
- appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
- "SET datfrozenxid = '%u', datminmxid = '%u'\n"
- "WHERE datname = ",
- frozenxid, minmxid);
- appendStringLiteralAH(creaQry, datname, fout);
- appendPQExpBufferStr(creaQry, ";\n");
-
- }
-
appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
- fmtId(datname));
+ qdatname);
dbDumpId = createDumpId();
@@ -2697,7 +2748,7 @@ dumpDatabase(Archive *fout)
NULL); /* Dumper Arg */
/* Compute correct tag for comments etc */
- appendPQExpBuffer(labelq, "DATABASE %s", fmtId(datname));
+ appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
/* Dump DB comment if any */
if (fout->remoteVersion >= 80200)
@@ -2717,7 +2768,7 @@ dumpDatabase(Archive *fout)
* Generates warning when loaded into a differently-named
* database.
*/
- appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
+ appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
appendStringLiteralAH(dbQry, comment, fout);
appendPQExpBufferStr(dbQry, ";\n");
@@ -2758,6 +2809,73 @@ dumpDatabase(Archive *fout)
PQclear(shres);
}
+ /*
+ * Dump ACL if any. Note that we do not support initial privileges
+ * (pg_init_privs) on databases.
+ */
+ dumpACL(fout, dbCatId, dbDumpId, "DATABASE",
+ qdatname, NULL, labelq->data, NULL,
+ dba, datacl, rdatacl, "", "");
+
+ /*
+ * Now construct a DATABASE PROPERTIES archive entry to restore any
+ * non-default database-level properties. We want to do this after
+ * reconnecting so that these properties won't apply during the restore
+ * session. In this way, restoring works even if there is, say, an ALTER
+ * DATABASE SET that turns on default_transaction_read_only.
+ */
+ resetPQExpBuffer(creaQry);
+ resetPQExpBuffer(delQry);
+
+ if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
+ appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
+ qdatname, datconnlimit);
+
+ if (strcmp(datistemplate, "t") == 0)
+ {
+ appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
+ qdatname);
+
+ /*
+ * The backend won't accept DROP DATABASE on a template database. We
+ * can deal with that by removing the template marking before the DROP
+ * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
+ * since no such command is currently supported, fake it with a direct
+ * UPDATE on pg_database.
+ */
+ appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
+ "SET datistemplate = false WHERE datname = ");
+ appendStringLiteralAH(delQry, datname, fout);
+ appendPQExpBufferStr(delQry, ";\n");
+ }
+
+ /* Add database-specific SET options */
+ dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
+
+ /*
+ * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
+ * entry, too. It can't go into the DATABASE entry because that would
+ * result in an implicit transaction block around the CREATE DATABASE.
+ */
+ if (dopt->binary_upgrade)
+ {
+ appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
+ appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
+ "SET datfrozenxid = '%u', datminmxid = '%u'\n"
+ "WHERE datname = ",
+ frozenxid, minmxid);
+ appendStringLiteralAH(creaQry, datname, fout);
+ appendPQExpBufferStr(creaQry, ";\n");
+ }
+
+ if (creaQry->len > 0)
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
+ datname, NULL, NULL, dba,
+ false, "DATABASE PROPERTIES", SECTION_PRE_DATA,
+ creaQry->data, delQry->data, NULL,
+ &(dbDumpId), 1,
+ NULL, NULL);
+
/*
* pg_largeobject and pg_largeobject_metadata come from the old system
* intact, so set their relfrozenxids and relminmxids.
@@ -2793,8 +2911,8 @@ dumpDatabase(Archive *fout)
appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
"WHERE oid = %u;\n",
- atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
- atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
+ atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
+ atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
LargeObjectRelationId);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
"pg_largeobject", NULL, NULL, "",
@@ -2833,8 +2951,8 @@ dumpDatabase(Archive *fout)
appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
"WHERE oid = %u;\n",
- atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
- atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
+ atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
+ atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
LargeObjectMetadataRelationId);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
"pg_largeobject_metadata", NULL, NULL, "",
@@ -2852,12 +2970,85 @@ dumpDatabase(Archive *fout)
PQclear(res);
+ free(qdatname);
destroyPQExpBuffer(dbQry);
destroyPQExpBuffer(delQry);
destroyPQExpBuffer(creaQry);
destroyPQExpBuffer(labelq);
}
+/*
+ * Collect any database-specific or role-and-database-specific SET options
+ * for this database, and append them to outbuf.
+ */
+static void
+dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
+ const char *dbname, Oid dboid)
+{
+ PGconn *conn = GetConnection(AH);
+ PQExpBuffer buf = createPQExpBuffer();
+ PGresult *res;
+ int count = 1;
+
+ /*
+ * First collect database-specific options. Pre-8.4 server versions lack
+ * unnest(), so we do this the hard way by querying once per subscript.
+ */
+ for (;;)
+ {
+ if (AH->remoteVersion >= 90000)
+ printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting "
+ "WHERE setrole = 0 AND setdatabase = '%u'::oid",
+ count, dboid);
+ else
+ printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE oid = '%u'::oid", count, dboid);
+
+ res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
+
+ if (PQntuples(res) == 1 &&
+ !PQgetisnull(res, 0, 0))
+ {
+ makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
+ "DATABASE", dbname, NULL, NULL,
+ outbuf);
+ PQclear(res);
+ count++;
+ }
+ else
+ {
+ PQclear(res);
+ break;
+ }
+ }
+
+ /* Now look for role-and-database-specific options */
+ if (AH->remoteVersion >= 90000)
+ {
+ /* Here we can assume we have unnest() */
+ printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
+ "FROM pg_db_role_setting s, pg_roles r "
+ "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
+ dboid);
+
+ res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
+
+ if (PQntuples(res) > 0)
+ {
+ int i;
+
+ for (i = 0; i < PQntuples(res); i++)
+ makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
+ "ROLE", PQgetvalue(res, i, 0),
+ "DATABASE", dbname,
+ outbuf);
+ }
+
+ PQclear(res);
+ }
+
+ destroyPQExpBuffer(buf);
+}
+
/*
* dumpEncoding: put the correct encoding into the archive
*/
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 3dd2c3871eb..2fd5a025af0 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -38,17 +38,10 @@ static void dumpGroups(PGconn *conn);
static void dropTablespaces(PGconn *conn);
static void dumpTablespaces(PGconn *conn);
static void dropDBs(PGconn *conn);
-static void dumpCreateDB(PGconn *conn);
-static void dumpDatabaseConfig(PGconn *conn, const char *dbname);
static void dumpUserConfig(PGconn *conn, const char *username);
-static void dumpDbRoleConfig(PGconn *conn);
-static void makeAlterConfigCommand(PGconn *conn, const char *arrayitem,
- const char *type, const char *name, const char *type2,
- const char *name2);
static void dumpDatabases(PGconn *conn);
static void dumpTimestamp(const char *msg);
-
-static int runPgDump(const char *dbname);
+static int runPgDump(const char *dbname, const char *create_opts);
static void buildShSecLabels(PGconn *conn, const char *catalog_name,
uint32 objectId, PQExpBuffer buffer,
const char *target, const char *objname);
@@ -62,6 +55,7 @@ static char pg_dump_bin[MAXPGPATH];
static const char *progname;
static PQExpBuffer pgdumpopts;
static char *connstr = "";
+static bool output_clean = false;
static bool skip_acls = false;
static bool verbose = false;
static bool dosync = true;
@@ -152,7 +146,6 @@ main(int argc, char *argv[])
trivalue prompt_password = TRI_DEFAULT;
bool data_only = false;
bool globals_only = false;
- bool output_clean = false;
bool roles_only = false;
bool tablespaces_only = false;
PGconn *conn;
@@ -558,17 +551,6 @@ main(int argc, char *argv[])
/* Dump tablespaces */
if (!roles_only && !no_tablespaces)
dumpTablespaces(conn);
-
- /* Dump CREATE DATABASE commands */
- if (binary_upgrade || (!globals_only && !roles_only && !tablespaces_only))
- dumpCreateDB(conn);
-
- /* Dump role/database settings */
- if (!tablespaces_only && !roles_only)
- {
- if (server_version >= 90000)
- dumpDbRoleConfig(conn);
- }
}
if (!globals_only && !roles_only && !tablespaces_only)
@@ -1262,8 +1244,6 @@ dumpTablespaces(PGconn *conn)
/*
* Dump commands to drop each database.
- *
- * This should match the set of databases targeted by dumpCreateDB().
*/
static void
dropDBs(PGconn *conn)
@@ -1271,24 +1251,30 @@ dropDBs(PGconn *conn)
PGresult *res;
int i;
+ /*
+ * Skip databases marked not datallowconn, since we'd be unable to connect
+ * to them anyway. This must agree with dumpDatabases().
+ */
res = executeQuery(conn,
"SELECT datname "
"FROM pg_database d "
- "WHERE datallowconn ORDER BY 1");
+ "WHERE datallowconn "
+ "ORDER BY datname");
if (PQntuples(res) > 0)
- fprintf(OPF, "--\n-- Drop databases\n--\n\n");
+ fprintf(OPF, "--\n-- Drop databases (except postgres and template1)\n--\n\n");
for (i = 0; i < PQntuples(res); i++)
{
char *dbname = PQgetvalue(res, i, 0);
/*
- * Skip "template1" and "postgres"; the restore script is almost
- * certainly going to be run in one or the other, and we don't know
- * which. This must agree with dumpCreateDB's choices!
+ * Skip "postgres" and "template1"; dumpDatabases() will deal with
+ * them specially. Also, be sure to skip "template0", even if for
+ * some reason it's not marked !datallowconn.
*/
if (strcmp(dbname, "template1") != 0 &&
+ strcmp(dbname, "template0") != 0 &&
strcmp(dbname, "postgres") != 0)
{
fprintf(OPF, "DROP DATABASE %s%s;\n",
@@ -1302,323 +1288,6 @@ dropDBs(PGconn *conn)
fprintf(OPF, "\n\n");
}
-/*
- * Dump commands to create each database.
- *
- * To minimize the number of reconnections (and possibly ensuing
- * password prompts) required by the output script, we emit all CREATE
- * DATABASE commands during the initial phase of the script, and then
- * run pg_dump for each database to dump the contents of that
- * database. We skip databases marked not datallowconn, since we'd be
- * unable to connect to them anyway (and besides, we don't want to
- * dump template0).
- */
-static void
-dumpCreateDB(PGconn *conn)
-{
- PQExpBuffer buf = createPQExpBuffer();
- char *default_encoding = NULL;
- char *default_collate = NULL;
- char *default_ctype = NULL;
- PGresult *res;
- int i;
-
- fprintf(OPF, "--\n-- Database creation\n--\n\n");
-
- /*
- * First, get the installation's default encoding and locale information.
- * We will dump encoding and locale specifications in the CREATE DATABASE
- * commands for just those databases with values different from defaults.
- *
- * We consider template0's encoding and locale to define the installation
- * default. Pre-8.4 installations do not have per-database locale
- * settings; for them, every database must necessarily be using the
- * installation default, so there's no need to do anything.
- */
- if (server_version >= 80400)
- res = executeQuery(conn,
- "SELECT pg_encoding_to_char(encoding), "
- "datcollate, datctype "
- "FROM pg_database "
- "WHERE datname = 'template0'");
- else
- res = executeQuery(conn,
- "SELECT pg_encoding_to_char(encoding), "
- "null::text AS datcollate, null::text AS datctype "
- "FROM pg_database "
- "WHERE datname = 'template0'");
-
- /* If for some reason the template DB isn't there, treat as unknown */
- if (PQntuples(res) > 0)
- {
- if (!PQgetisnull(res, 0, 0))
- default_encoding = pg_strdup(PQgetvalue(res, 0, 0));
- if (!PQgetisnull(res, 0, 1))
- default_collate = pg_strdup(PQgetvalue(res, 0, 1));
- if (!PQgetisnull(res, 0, 2))
- default_ctype = pg_strdup(PQgetvalue(res, 0, 2));
- }
-
- PQclear(res);
-
-
- /*
- * Now collect all the information about databases to dump.
- *
- * For the database ACLs, as of 9.6, we extract both the positive (as
- * datacl) and negative (as rdatacl) ACLs, relative to the default ACL for
- * databases, which are then passed to buildACLCommands() below.
- *
- * See buildACLQueries() and buildACLCommands().
- *
- * Note that we do not support initial privileges (pg_init_privs) on
- * databases.
- */
- if (server_version >= 90600)
- printfPQExpBuffer(buf,
- "SELECT datname, "
- "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
- "pg_encoding_to_char(d.encoding), "
- "datcollate, datctype, datfrozenxid, datminmxid, "
- "datistemplate, "
- "(SELECT pg_catalog.array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
- " SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba))) AS acl "
- " EXCEPT SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba))) as datacls)"
- "AS datacl, "
- "(SELECT pg_catalog.array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
- " SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba)) AS acl "
- " EXCEPT SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba)))) as rdatacls)"
- "AS rdatacl, "
- "datconnlimit, "
- "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
- "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
- "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
- else if (server_version >= 90300)
- printfPQExpBuffer(buf,
- "SELECT datname, "
- "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
- "pg_encoding_to_char(d.encoding), "
- "datcollate, datctype, datfrozenxid, datminmxid, "
- "datistemplate, datacl, '' as rdatacl, "
- "datconnlimit, "
- "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
- "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
- "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
- else if (server_version >= 80400)
- printfPQExpBuffer(buf,
- "SELECT datname, "
- "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
- "pg_encoding_to_char(d.encoding), "
- "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
- "datistemplate, datacl, '' as rdatacl, "
- "datconnlimit, "
- "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
- "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
- "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
- else if (server_version >= 80100)
- printfPQExpBuffer(buf,
- "SELECT datname, "
- "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
- "pg_encoding_to_char(d.encoding), "
- "null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
- "datistemplate, datacl, '' as rdatacl, "
- "datconnlimit, "
- "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
- "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
- "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
- else
- printfPQExpBuffer(buf,
- "SELECT datname, "
- "coalesce(usename, (select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
- "pg_encoding_to_char(d.encoding), "
- "null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
- "datistemplate, datacl, '' as rdatacl, "
- "-1 as datconnlimit, "
- "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
- "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
- "WHERE datallowconn ORDER BY 1");
-
- res = executeQuery(conn, buf->data);
-
- for (i = 0; i < PQntuples(res); i++)
- {
- char *dbname = PQgetvalue(res, i, 0);
- char *dbowner = PQgetvalue(res, i, 1);
- char *dbencoding = PQgetvalue(res, i, 2);
- char *dbcollate = PQgetvalue(res, i, 3);
- char *dbctype = PQgetvalue(res, i, 4);
- uint32 dbfrozenxid = atooid(PQgetvalue(res, i, 5));
- uint32 dbminmxid = atooid(PQgetvalue(res, i, 6));
- char *dbistemplate = PQgetvalue(res, i, 7);
- char *dbacl = PQgetvalue(res, i, 8);
- char *rdbacl = PQgetvalue(res, i, 9);
- char *dbconnlimit = PQgetvalue(res, i, 10);
- char *dbtablespace = PQgetvalue(res, i, 11);
- char *fdbname;
-
- fdbname = pg_strdup(fmtId(dbname));
-
- resetPQExpBuffer(buf);
-
- /*
- * Skip the CREATE DATABASE commands for "template1" and "postgres",
- * since they are presumably already there in the destination cluster.
- * We do want to emit their ACLs and config options if any, however.
- */
- if (strcmp(dbname, "template1") != 0 &&
- strcmp(dbname, "postgres") != 0)
- {
- appendPQExpBuffer(buf, "CREATE DATABASE %s", fdbname);
-
- appendPQExpBufferStr(buf, " WITH TEMPLATE = template0");
-
- if (strlen(dbowner) != 0)
- appendPQExpBuffer(buf, " OWNER = %s", fmtId(dbowner));
-
- if (default_encoding && strcmp(dbencoding, default_encoding) != 0)
- {
- appendPQExpBufferStr(buf, " ENCODING = ");
- appendStringLiteralConn(buf, dbencoding, conn);
- }
-
- if (default_collate && strcmp(dbcollate, default_collate) != 0)
- {
- appendPQExpBufferStr(buf, " LC_COLLATE = ");
- appendStringLiteralConn(buf, dbcollate, conn);
- }
-
- if (default_ctype && strcmp(dbctype, default_ctype) != 0)
- {
- appendPQExpBufferStr(buf, " LC_CTYPE = ");
- appendStringLiteralConn(buf, dbctype, conn);
- }
-
- /*
- * Output tablespace if it isn't the default. For default, it
- * uses the default from the template database. If tablespace is
- * specified and tablespace creation failed earlier, (e.g. no such
- * directory), the database creation will fail too. One solution
- * would be to use 'SET default_tablespace' like we do in pg_dump
- * for setting non-default database locations.
- */
- if (strcmp(dbtablespace, "pg_default") != 0 && !no_tablespaces)
- appendPQExpBuffer(buf, " TABLESPACE = %s",
- fmtId(dbtablespace));
-
- if (strcmp(dbistemplate, "t") == 0)
- appendPQExpBuffer(buf, " IS_TEMPLATE = true");
-
- if (strcmp(dbconnlimit, "-1") != 0)
- appendPQExpBuffer(buf, " CONNECTION LIMIT = %s",
- dbconnlimit);
-
- appendPQExpBufferStr(buf, ";\n");
- }
- else if (strcmp(dbtablespace, "pg_default") != 0 && !no_tablespaces)
- {
- /*
- * Cannot change tablespace of the database we're connected to, so
- * to move "postgres" to another tablespace, we connect to
- * "template1", and vice versa.
- */
- if (strcmp(dbname, "postgres") == 0)
- appendPQExpBuffer(buf, "\\connect template1\n");
- else
- appendPQExpBuffer(buf, "\\connect postgres\n");
-
- appendPQExpBuffer(buf, "ALTER DATABASE %s SET TABLESPACE %s;\n",
- fdbname, fmtId(dbtablespace));
-
- /* connect to original database */
- appendPsqlMetaConnect(buf, dbname);
- }
-
- if (binary_upgrade)
- {
- appendPQExpBufferStr(buf, "-- For binary upgrade, set datfrozenxid and datminmxid.\n");
- appendPQExpBuffer(buf, "UPDATE pg_catalog.pg_database "
- "SET datfrozenxid = '%u', datminmxid = '%u' "
- "WHERE datname = ",
- dbfrozenxid, dbminmxid);
- appendStringLiteralConn(buf, dbname, conn);
- appendPQExpBufferStr(buf, ";\n");
- }
-
- if (!skip_acls &&
- !buildACLCommands(fdbname, NULL, "DATABASE",
- dbacl, rdbacl, dbowner,
- "", server_version, buf))
- {
- fprintf(stderr, _("%s: could not parse ACL list (%s) for database \"%s\"\n"),
- progname, dbacl, fdbname);
- PQfinish(conn);
- exit_nicely(1);
- }
-
- fprintf(OPF, "%s", buf->data);
-
- dumpDatabaseConfig(conn, dbname);
-
- free(fdbname);
- }
-
- if (default_encoding)
- free(default_encoding);
- if (default_collate)
- free(default_collate);
- if (default_ctype)
- free(default_ctype);
-
- PQclear(res);
- destroyPQExpBuffer(buf);
-
- fprintf(OPF, "\n\n");
-}
-
-
-/*
- * Dump database-specific configuration
- */
-static void
-dumpDatabaseConfig(PGconn *conn, const char *dbname)
-{
- PQExpBuffer buf = createPQExpBuffer();
- int count = 1;
-
- for (;;)
- {
- PGresult *res;
-
- if (server_version >= 90000)
- printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting WHERE "
- "setrole = 0 AND setdatabase = (SELECT oid FROM pg_database WHERE datname = ", count);
- else
- printfPQExpBuffer(buf, "SELECT datconfig[%d] FROM pg_database WHERE datname = ", count);
- appendStringLiteralConn(buf, dbname, conn);
-
- if (server_version >= 90000)
- appendPQExpBufferChar(buf, ')');
-
- res = executeQuery(conn, buf->data);
- if (PQntuples(res) == 1 &&
- !PQgetisnull(res, 0, 0))
- {
- makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
- "DATABASE", dbname, NULL, NULL);
- PQclear(res);
- count++;
- }
- else
- {
- PQclear(res);
- break;
- }
- }
-
- destroyPQExpBuffer(buf);
-}
-
-
/*
* Dump user-specific configuration
@@ -1649,8 +1318,11 @@ dumpUserConfig(PGconn *conn, const char *username)
if (PQntuples(res) == 1 &&
!PQgetisnull(res, 0, 0))
{
+ resetPQExpBuffer(buf);
makeAlterConfigCommand(conn, PQgetvalue(res, 0, 0),
- "ROLE", username, NULL, NULL);
+ "ROLE", username, NULL, NULL,
+ buf);
+ fprintf(OPF, "%s", buf->data);
PQclear(res);
count++;
}
@@ -1665,85 +1337,6 @@ dumpUserConfig(PGconn *conn, const char *username)
}
-/*
- * Dump user-and-database-specific configuration
- */
-static void
-dumpDbRoleConfig(PGconn *conn)
-{
- PQExpBuffer buf = createPQExpBuffer();
- PGresult *res;
- int i;
-
- printfPQExpBuffer(buf, "SELECT rolname, datname, unnest(setconfig) "
- "FROM pg_db_role_setting, %s u, pg_database "
- "WHERE setrole = u.oid AND setdatabase = pg_database.oid", role_catalog);
- res = executeQuery(conn, buf->data);
-
- if (PQntuples(res) > 0)
- {
- fprintf(OPF, "--\n-- Per-Database Role Settings \n--\n\n");
-
- for (i = 0; i < PQntuples(res); i++)
- {
- makeAlterConfigCommand(conn, PQgetvalue(res, i, 2),
- "ROLE", PQgetvalue(res, i, 0),
- "DATABASE", PQgetvalue(res, i, 1));
- }
-
- fprintf(OPF, "\n\n");
- }
-
- PQclear(res);
- destroyPQExpBuffer(buf);
-}
-
-
-/*
- * Helper function for dumpXXXConfig().
- */
-static void
-makeAlterConfigCommand(PGconn *conn, const char *arrayitem,
- const char *type, const char *name,
- const char *type2, const char *name2)
-{
- char *pos;
- char *mine;
- PQExpBuffer buf;
-
- mine = pg_strdup(arrayitem);
- pos = strchr(mine, '=');
- if (pos == NULL)
- {
- free(mine);
- return;
- }
-
- buf = createPQExpBuffer();
-
- *pos = 0;
- appendPQExpBuffer(buf, "ALTER %s %s ", type, fmtId(name));
- if (type2 != NULL && name2 != NULL)
- appendPQExpBuffer(buf, "IN %s %s ", type2, fmtId(name2));
- appendPQExpBuffer(buf, "SET %s TO ", fmtId(mine));
-
- /*
- * Some GUC variable names are 'LIST' type and hence must not be quoted.
- */
- if (pg_strcasecmp(mine, "DateStyle") == 0
- || pg_strcasecmp(mine, "search_path") == 0)
- appendPQExpBufferStr(buf, pos + 1);
- else
- appendStringLiteralConn(buf, pos + 1, conn);
- appendPQExpBufferStr(buf, ";\n");
-
- fprintf(OPF, "%s", buf->data);
- destroyPQExpBuffer(buf);
- free(mine);
-}
-
-
-
/*
* Dump contents of databases.
*/
@@ -1753,38 +1346,62 @@ dumpDatabases(PGconn *conn)
PGresult *res;
int i;
- res = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1");
+ /*
+ * Skip databases marked not datallowconn, since we'd be unable to connect
+ * to them anyway. This must agree with dropDBs().
+ *
+ * We arrange for template1 to be processed first, then we process other
+ * DBs in alphabetical order. If we just did them all alphabetically, we
+ * might find ourselves trying to drop the "postgres" database while still
+ * connected to it. This makes trying to run the restore script while
+ * connected to "template1" a bad idea, but there's no fixed order that
+ * doesn't have some failure mode with --clean.
+ */
+ res = executeQuery(conn,
+ "SELECT datname "
+ "FROM pg_database d "
+ "WHERE datallowconn "
+ "ORDER BY (datname <> 'template1'), datname");
for (i = 0; i < PQntuples(res); i++)
{
+ char *dbname = PQgetvalue(res, i, 0);
+ const char *create_opts;
int ret;
- char *dbname = PQgetvalue(res, i, 0);
- PQExpBufferData connectbuf;
+ /* Skip template0, even if it's not marked !datallowconn. */
+ if (strcmp(dbname, "template0") == 0)
+ continue;
if (verbose)
fprintf(stderr, _("%s: dumping database \"%s\"...\n"), progname, dbname);
- initPQExpBuffer(&connectbuf);
- appendPsqlMetaConnect(&connectbuf, dbname);
- fprintf(OPF, "%s\n", connectbuf.data);
- termPQExpBuffer(&connectbuf);
-
/*
- * Restore will need to write to the target cluster. This connection
- * setting is emitted for pg_dumpall rather than in the code also used
- * by pg_dump, so that a cluster with databases or users which have
- * this flag turned on can still be replicated through pg_dumpall
- * without editing the file or stream. With pg_dump there are many
- * other ways to allow the file to be used, and leaving it out allows
- * users to protect databases from being accidental restore targets.
+ * We assume that "template1" and "postgres" already exist in the
+ * target installation. dropDBs() won't have removed them, for fear
+ * of removing the DB the restore script is initially connected to. If
+ * --clean was specified, tell pg_dump to drop and recreate them;
+ * otherwise we'll merely restore their contents. Other databases
+ * should simply be created.
*/
- fprintf(OPF, "SET default_transaction_read_only = off;\n\n");
+ if (strcmp(dbname, "template1") == 0 || strcmp(dbname, "postgres") == 0)
+ {
+ if (output_clean)
+ create_opts = "--clean --create";
+ else
+ {
+ create_opts = "";
+ /* Since pg_dump won't emit a \connect command, we must */
+ fprintf(OPF, "\\connect %s\n\n", dbname);
+ }
+ }
+ else
+ create_opts = "--create";
if (filename)
fclose(OPF);
- ret = runPgDump(dbname);
+ ret = runPgDump(dbname, create_opts);
if (ret != 0)
{
fprintf(stderr, _("%s: pg_dump failed on database \"%s\", exiting\n"), progname, dbname);
@@ -1810,17 +1427,17 @@ dumpDatabases(PGconn *conn)
/*
- * Run pg_dump on dbname.
+ * Run pg_dump on dbname, with specified options.
*/
static int
-runPgDump(const char *dbname)
+runPgDump(const char *dbname, const char *create_opts)
{
PQExpBuffer connstrbuf = createPQExpBuffer();
PQExpBuffer cmd = createPQExpBuffer();
int ret;
- appendPQExpBuffer(cmd, "\"%s\" %s", pg_dump_bin,
- pgdumpopts->data);
+ appendPQExpBuffer(cmd, "\"%s\" %s %s", pg_dump_bin,
+ pgdumpopts->data, create_opts);
/*
* If we have a filename, use the undocumented plain-append pg_dump
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index fce1465c118..74730bfc659 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -1516,11 +1516,14 @@ qr/^ALTER (?!EVENT TRIGGER|LARGE OBJECT|PUBLICATION|SUBSCRIPTION)(.*) OWNER TO .
all_runs => 1,
catch_all => 'COMMENT commands',
regexp => qr/^COMMENT ON DATABASE postgres IS .*;/m,
- like => {
+ # Should appear in the same tests as "CREATE DATABASE postgres"
+ like => { createdb => 1, },
+ unlike => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
- createdb => 1,
+ column_inserts => 1,
+ data_only => 1,
defaults => 1,
exclude_dump_test_schema => 1,
exclude_test_table => 1,
@@ -1528,18 +1531,18 @@ qr/^ALTER (?!EVENT TRIGGER|LARGE OBJECT|PUBLICATION|SUBSCRIPTION)(.*) OWNER TO .
no_blobs => 1,
no_privs => 1,
no_owner => 1,
+ only_dump_test_schema => 1,
+ only_dump_test_table => 1,
pg_dumpall_dbprivs => 1,
+ pg_dumpall_globals => 1,
+ pg_dumpall_globals_clean => 1,
+ role => 1,
schema_only => 1,
section_pre_data => 1,
- with_oids => 1, },
- unlike => {
- column_inserts => 1,
- data_only => 1,
- only_dump_test_schema => 1,
- only_dump_test_table => 1,
- role => 1,
- section_post_data => 1,
- test_schema_plus_blobs => 1, }, },
+ section_data => 1,
+ section_post_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, }, },
'COMMENT ON EXTENSION plpgsql' => {
all_runs => 1,
diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c
index 872621489f2..a67e484a857 100644
--- a/src/bin/pg_upgrade/pg_upgrade.c
+++ b/src/bin/pg_upgrade/pg_upgrade.c
@@ -46,7 +46,7 @@
#endif
static void prepare_new_cluster(void);
-static void prepare_new_databases(void);
+static void prepare_new_globals(void);
static void create_new_objects(void);
static void copy_xact_xlog_xid(void);
static void set_frozenxids(bool minmxid_only);
@@ -124,7 +124,7 @@ main(int argc, char **argv)
/* -- NEW -- */
start_postmaster(&new_cluster, true);
- prepare_new_databases();
+ prepare_new_globals();
create_new_objects();
@@ -271,7 +271,7 @@ prepare_new_cluster(void)
static void
-prepare_new_databases(void)
+prepare_new_globals(void)
{
/*
* We set autovacuum_freeze_max_age to its maximum value so autovacuum
@@ -283,20 +283,11 @@ prepare_new_databases(void)
prep_status("Restoring global objects in the new cluster");
- /*
- * We have to create the databases first so we can install support
- * functions in all the other databases. Ideally we could create the
- * support functions in template1 but pg_dumpall creates database using
- * the template0 template.
- */
exec_prog(UTILITY_LOG_FILE, NULL, true, true,
"\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
new_cluster.bindir, cluster_conn_opts(&new_cluster),
GLOBALS_DUMP_FILE);
check_ok();
-
- /* we load this to get a current list of databases */
- get_db_and_rel_infos(&new_cluster);
}
@@ -312,33 +303,40 @@ create_new_objects(void)
char sql_file_name[MAXPGPATH],
log_file_name[MAXPGPATH];
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
- PQExpBufferData connstr,
- escaped_connstr;
-
- initPQExpBuffer(&connstr);
- appendPQExpBuffer(&connstr, "dbname=");
- appendConnStrVal(&connstr, old_db->db_name);
- initPQExpBuffer(&escaped_connstr);
- appendShellString(&escaped_connstr, connstr.data);
- termPQExpBuffer(&connstr);
+ const char *create_opts;
+ const char *starting_db;
pg_log(PG_STATUS, "%s", old_db->db_name);
snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
/*
- * pg_dump only produces its output at the end, so there is little
- * parallelism if using the pipe.
+ * template1 and postgres databases will already exist in the target
+ * installation, so tell pg_restore to drop and recreate them;
+ * otherwise we would fail to propagate their database-level
+ * properties.
*/
+ if (strcmp(old_db->db_name, "template1") == 0 ||
+ strcmp(old_db->db_name, "postgres") == 0)
+ create_opts = "--clean --create";
+ else
+ create_opts = "--create";
+
+ /* When processing template1, we can't connect there to start with */
+ if (strcmp(old_db->db_name, "template1") == 0)
+ starting_db = "postgres";
+ else
+ starting_db = "template1";
+
parallel_exec_prog(log_file_name,
NULL,
- "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname %s \"%s\"",
+ "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
+ "--dbname %s \"%s\"",
new_cluster.bindir,
cluster_conn_opts(&new_cluster),
- escaped_connstr.data,
+ create_opts,
+ starting_db,
sql_file_name);
-
- termPQExpBuffer(&escaped_connstr);
}
/* reap all children */
@@ -355,7 +353,7 @@ create_new_objects(void)
if (GET_MAJOR_VERSION(old_cluster.major_version) < 903)
set_frozenxids(true);
- /* regenerate now that we have objects in the databases */
+ /* update new_cluster info now that we have objects in the databases */
get_db_and_rel_infos(&new_cluster);
}