From 2ea624b4b51caa0e82a4084d2499f5fc72cbe418 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 7 Oct 2020 12:50:54 -0400 Subject: [PATCH] Rethink recent fix for pg_dump's handling of extension config tables. Commit 3eb3d3e78 was a few bricks shy of a load: while it correctly set the table's "interesting" flag when deciding to dump the data of an extension config table, it was not correct to clear that flag if we concluded we shouldn't dump the data. This led to the crash reported in bug #16655, because in fact we'll traverse dumpTableSchema anyway for all extension tables (to see if they have user-added seclabels or RLS policies). The right thing to do is to force "interesting" true in makeTableDataInfo, and otherwise leave the flag alone. (Doing it there is more future-proof in case additional calls are added, and it also avoids setting the flag unnecessarily if that function decides the table is non-dumpable.) This investigation also showed that while only the --inserts code path had an obvious failure in the case considered by 3eb3d3e78, the COPY code path also has a problem with not having loaded table subsidiary data. That causes fmtCopyColumnList to silently return an empty string instead of the correct column list. That accidentally mostly works, which perhaps is why we didn't notice this before. It would only fail if the restore column order is different from the dump column order, which only happens in weird inheritance cases, so it's not surprising nobody had hit the case with an extension config table. Nonetheless, it's a bug, and it goes a long way back, not just to v12 where the --inserts code path started to have a problem with this. In hopes of catching such cases a bit sooner in future, add some Asserts that "interesting" has been set in both dumpTableData and dumpTableSchema. Adjust the test case added by 3eb3d3e78 so that it checks the COPY rather than INSERT form of that bug, allowing it to detect the longer-standing symptom. Per bug #16655 from Cameron Daniel. Back-patch to all supported branches. Discussion: https://postgr.es/m/16655-5c92d6b3a9438137@postgresql.org Discussion: https://postgr.es/m/18048b44-3414-b983-8c7c-9165b177900d@2ndQuadrant.com --- src/bin/pg_dump/pg_dump.c | 14 +++--- src/test/modules/test_pg_dump/t/001_base.pl | 44 +++++++++++++------ .../test_pg_dump/test_pg_dump--1.0.sql | 6 +-- 3 files changed, 42 insertions(+), 22 deletions(-) diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index fe16a8cc2ee..f81ba7334a3 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -2099,8 +2099,6 @@ dumpTableData_insert(Archive *fout, void *dcontext) if (nfields == 0) continue; - Assert(tbinfo->attgenerated); - /* Emit a row heading */ if (rows_per_statement == 1) archputs(" (", fout); @@ -2261,6 +2259,9 @@ dumpTableData(Archive *fout, TableDataInfo *tdinfo) char *copyStmt; const char *copyFrom; + /* We had better have loaded per-column details about this table */ + Assert(tbinfo->interesting); + if (!dopt->dump_inserts) { /* Dump/restore using COPY */ @@ -2452,6 +2453,9 @@ makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo) addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId); tbinfo->dataObj = tdinfo; + + /* Make sure that we'll collect per-column info for this table. */ + tbinfo->interesting = true; } /* @@ -15772,10 +15776,12 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo) int j, k; + /* We had better have loaded per-column details about this table */ + Assert(tbinfo->interesting); + qrelname = pg_strdup(fmtId(tbinfo->dobj.name)); qualrelname = pg_strdup(fmtQualifiedDumpable(tbinfo)); - if (tbinfo->hasoids) pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")", qrelname); @@ -18143,8 +18149,6 @@ processExtensionTables(Archive *fout, ExtensionInfo extinfo[], configtbl->dataObj->filtercond = pg_strdup(extconditionarray[j]); } } - - configtbl->interesting = dumpobj; } } if (extconfigarray) diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl index 78aa07ce511..b3227b855c0 100644 --- a/src/test/modules/test_pg_dump/t/001_base.pl +++ b/src/test/modules/test_pg_dump/t/001_base.pl @@ -135,9 +135,17 @@ my %pgdump_runs = ( "$tempdir/defaults_tar_format.tar", ], }, + exclude_table => { + dump_cmd => [ + 'pg_dump', + '--exclude-table=regress_table_dumpable', + "--file=$tempdir/exclude_table.sql", + 'postgres', + ], + }, extension_schema => { dump_cmd => [ - 'pg_dump', '--schema=public', '--inserts', + 'pg_dump', '--schema=public', "--file=$tempdir/extension_schema.sql", 'postgres', ], }, @@ -225,6 +233,7 @@ my %full_runs = ( clean_if_exists => 1, createdb => 1, defaults => 1, + exclude_table => 1, no_privs => 1, no_owner => 1,); @@ -317,11 +326,28 @@ my %tests = ( regexp => qr/^ \QCREATE TABLE public.regress_pg_dump_table (\E \n\s+\Qcol1 integer NOT NULL,\E - \n\s+\Qcol2 integer\E + \n\s+\Qcol2 integer,\E + \n\s+\QCONSTRAINT regress_pg_dump_table_col2_check CHECK ((col2 > 0))\E \n\);\n/xm, like => { binary_upgrade => 1, }, }, + 'COPY public.regress_table_dumpable (col1)' => { + regexp => qr/^ + \QCOPY public.regress_table_dumpable (col1) FROM stdin;\E + \n/xm, + like => { + %full_runs, + data_only => 1, + section_data => 1, + extension_schema => 1, + }, + unlike => { + binary_upgrade => 1, + exclude_table => 1, + }, + }, + 'CREATE ACCESS METHOD regress_test_am' => { regexp => qr/^ \QCREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler;\E @@ -443,7 +469,8 @@ my %tests = ( regexp => qr/^ \QCREATE TABLE regress_pg_dump_schema.test_table (\E \n\s+\Qcol1 integer,\E - \n\s+\Qcol2 integer\E + \n\s+\Qcol2 integer,\E + \n\s+\QCONSTRAINT test_table_col2_check CHECK ((col2 > 0))\E \n\);\n/xm, like => { binary_upgrade => 1, }, }, @@ -578,17 +605,6 @@ my %tests = ( schema_only => 1, section_pre_data => 1, }, - }, - - # Dumpable object inside specific schema - 'INSERT INTO public.regress_table_dumpable VALUES (1);' => { - create_sql => 'INSERT INTO public.regress_table_dumpable VALUES (1);', - regexp => qr/^ - \QINSERT INTO public.regress_table_dumpable VALUES (1);\E - \n/xm, - like => { - extension_schema => 1, - }, },); ######################################### diff --git a/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql b/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql index 90e461ed357..c7a35c3afa0 100644 --- a/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql +++ b/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql @@ -5,7 +5,7 @@ CREATE TABLE regress_pg_dump_table ( col1 serial, - col2 int + col2 int check (col2 > 0) ); CREATE SEQUENCE regress_pg_dump_seq; @@ -14,7 +14,7 @@ CREATE SEQUENCE regress_seq_dumpable; SELECT pg_catalog.pg_extension_config_dump('regress_seq_dumpable', ''); CREATE TABLE regress_table_dumpable ( - col1 int + col1 int check (col1 > 0) ); SELECT pg_catalog.pg_extension_config_dump('regress_table_dumpable', ''); @@ -34,7 +34,7 @@ CREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler; -- this extension. CREATE TABLE regress_pg_dump_schema.test_table ( col1 int, - col2 int + col2 int check (col2 > 0) ); GRANT SELECT ON regress_pg_dump_schema.test_table TO regress_dump_test_role;