aboutsummaryrefslogtreecommitdiff
path: root/src/bin/pg_dump
diff options
context:
space:
mode:
Diffstat (limited to 'src/bin/pg_dump')
-rw-r--r--src/bin/pg_dump/meson.build6
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c4
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c11
-rw-r--r--src/bin/pg_dump/pg_dump.c142
-rw-r--r--src/bin/pg_dump/pg_dump.h1
-rw-r--r--src/bin/pg_dump/pg_dumpall.c10
-rw-r--r--src/bin/pg_dump/pg_restore.c49
-rw-r--r--src/bin/pg_dump/t/001_basic.pl13
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl100
-rw-r--r--src/bin/pg_dump/t/006_pg_dumpall.pl53
10 files changed, 295 insertions, 94 deletions
diff --git a/src/bin/pg_dump/meson.build b/src/bin/pg_dump/meson.build
index d8e9e101254..4a4ebbd8ec9 100644
--- a/src/bin/pg_dump/meson.build
+++ b/src/bin/pg_dump/meson.build
@@ -91,9 +91,9 @@ tests += {
'bd': meson.current_build_dir(),
'tap': {
'env': {
- 'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
- 'LZ4': program_lz4.found() ? program_lz4.path() : '',
- 'ZSTD': program_zstd.found() ? program_zstd.path() : '',
+ 'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '',
+ 'LZ4': program_lz4.found() ? program_lz4.full_path() : '',
+ 'ZSTD': program_zstd.found() ? program_zstd.full_path() : '',
'with_icu': icu.found() ? 'yes' : 'no',
},
'tests': [
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index afa42337b11..197c1295d93 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -152,7 +152,7 @@ InitDumpOptions(DumpOptions *opts)
opts->dumpSections = DUMP_UNSECTIONED;
opts->dumpSchema = true;
opts->dumpData = true;
- opts->dumpStatistics = true;
+ opts->dumpStatistics = false;
}
/*
@@ -2655,7 +2655,7 @@ WriteToc(ArchiveHandle *AH)
pg_fatal("unexpected TOC entry in WriteToc(): %d %s %s",
te->dumpId, te->desc, te->tag);
- if (fseeko(AH->FH, te->defnLen, SEEK_CUR != 0))
+ if (fseeko(AH->FH, te->defnLen, SEEK_CUR) != 0)
pg_fatal("error during file seek: %m");
}
else if (te->defnDumper)
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 21b00792a8a..bc2a2fb4797 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -412,10 +412,15 @@ _LoadLOs(ArchiveHandle *AH, TocEntry *te)
/*
* Note: before archive v16, there was always only one BLOBS TOC entry,
- * now there can be multiple. We don't need to worry what version we are
- * reading though, because tctx->filename should be correct either way.
+ * now there can be multiple. Furthermore, although the actual filename
+ * was always "blobs.toc" before v16, the value of tctx->filename did not
+ * match that before commit 548e50976 fixed it. For simplicity we assume
+ * it must be "blobs.toc" in all archives before v16.
*/
- setFilePath(AH, tocfname, tctx->filename);
+ if (AH->version < K_VERS_1_16)
+ setFilePath(AH, tocfname, "blobs.toc");
+ else
+ setFilePath(AH, tocfname, tctx->filename);
CFH = ctx->LOsTocFH = InitDiscoverCompressFileHandle(tocfname, PG_BINARY_R);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index e2e7975b34e..1937997ea67 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -350,7 +350,9 @@ static void buildMatViewRefreshDependencies(Archive *fout);
static void getTableDataFKConstraints(void);
static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
- int i_notnull_name, int i_notnull_invalidoid,
+ int i_notnull_name,
+ int i_notnull_comment,
+ int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
PQExpBuffer *invalidnotnulloids);
@@ -1235,7 +1237,7 @@ main(int argc, char **argv)
static void
help(const char *progname)
{
- printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
@@ -6890,7 +6892,8 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
(relkind == RELKIND_PARTITIONED_TABLE) ||
(relkind == RELKIND_INDEX) ||
(relkind == RELKIND_PARTITIONED_INDEX) ||
- (relkind == RELKIND_MATVIEW))
+ (relkind == RELKIND_MATVIEW ||
+ relkind == RELKIND_FOREIGN_TABLE))
{
RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo));
DumpableObject *dobj = &info->dobj;
@@ -6929,6 +6932,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
case RELKIND_RELATION:
case RELKIND_PARTITIONED_TABLE:
case RELKIND_MATVIEW:
+ case RELKIND_FOREIGN_TABLE:
info->section = SECTION_DATA;
break;
case RELKIND_INDEX:
@@ -6936,7 +6940,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
info->section = SECTION_POST_DATA;
break;
default:
- pg_fatal("cannot dump statistics for relation kind '%c'",
+ pg_fatal("cannot dump statistics for relation kind \"%c\"",
info->relkind);
}
@@ -9004,6 +9008,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_attalign;
int i_attislocal;
int i_notnull_name;
+ int i_notnull_comment;
int i_notnull_noinherit;
int i_notnull_islocal;
int i_notnull_invalidoid;
@@ -9087,7 +9092,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* Find out any NOT NULL markings for each column. In 18 and up we read
- * pg_constraint to obtain the constraint name. notnull_noinherit is set
+ * pg_constraint to obtain the constraint name, and for valid constraints
+ * also pg_description to obtain its comment. notnull_noinherit is set
* according to the NO INHERIT property. For versions prior to 18, we
* store an empty string as the name when a constraint is marked as
* attnotnull (this cues dumpTableSchema to print the NOT NULL clause
@@ -9095,7 +9101,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
*
* For invalid constraints, we need to store their OIDs for processing
* elsewhere, so we bring the pg_constraint.oid value when the constraint
- * is invalid, and NULL otherwise.
+ * is invalid, and NULL otherwise. Their comments are handled not here
+ * but by collectComments, because they're their own dumpable object.
*
* We track in notnull_islocal whether the constraint was defined directly
* in this table or via an ancestor, for binary upgrade. flagInhAttrs
@@ -9105,6 +9112,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
"co.conname AS notnull_name,\n"
+ "CASE WHEN co.convalidated THEN pt.description"
+ " ELSE NULL END AS notnull_comment,\n"
"CASE WHEN NOT co.convalidated THEN co.oid "
"ELSE NULL END AS notnull_invalidoid,\n"
"co.connoinherit AS notnull_noinherit,\n"
@@ -9112,6 +9121,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
else
appendPQExpBufferStr(q,
"CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
+ "NULL AS notnull_comment,\n"
"NULL AS notnull_invalidoid,\n"
"false AS notnull_noinherit,\n"
"a.attislocal AS notnull_islocal,\n");
@@ -9155,15 +9165,16 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* In versions 18 and up, we need pg_constraint for explicit NOT NULL
- * entries. Also, we need to know if the NOT NULL for each column is
- * backing a primary key.
+ * entries and pg_description to get their comments.
*/
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
" LEFT JOIN pg_catalog.pg_constraint co ON "
"(a.attrelid = co.conrelid\n"
" AND co.contype = 'n' AND "
- "co.conkey = array[a.attnum])\n");
+ "co.conkey = array[a.attnum])\n"
+ " LEFT JOIN pg_catalog.pg_description pt ON "
+ "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
appendPQExpBufferStr(q,
"WHERE a.attnum > 0::pg_catalog.int2\n"
@@ -9187,6 +9198,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
i_attalign = PQfnumber(res, "attalign");
i_attislocal = PQfnumber(res, "attislocal");
i_notnull_name = PQfnumber(res, "notnull_name");
+ i_notnull_comment = PQfnumber(res, "notnull_comment");
i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
i_notnull_islocal = PQfnumber(res, "notnull_islocal");
@@ -9255,6 +9267,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
tbinfo->attfdwoptions = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->attmissingval = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_constrs = (char **) pg_malloc(numatts * sizeof(char *));
+ tbinfo->notnull_comment = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_invalid = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_noinh = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_islocal = (bool *) pg_malloc(numatts * sizeof(bool));
@@ -9286,11 +9299,14 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
determineNotNullFlags(fout, res, r,
tbinfo, j,
i_notnull_name,
+ i_notnull_comment,
i_notnull_invalidoid,
i_notnull_noinherit,
i_notnull_islocal,
&invalidnotnulloids);
+ tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
+ NULL : pg_strdup(PQgetvalue(res, r, i_notnull_comment));
tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
@@ -9461,7 +9477,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_consrc;
int i_conislocal;
- pg_log_info("finding invalid not null constraints");
+ pg_log_info("finding invalid not-null constraints");
resetPQExpBuffer(q);
appendPQExpBuffer(q,
@@ -9702,8 +9718,9 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
* 4) The column has a constraint with a known name; in that case
* notnull_constrs carries that name and dumpTableSchema will print
* "CONSTRAINT the_name NOT NULL". However, if the name is the default
- * (table_column_not_null), there's no need to print that name in the dump,
- * so notnull_constrs is set to the empty string and it behaves as case 2.
+ * (table_column_not_null) and there's no comment on the constraint,
+ * there's no need to print that name in the dump, so notnull_constrs
+ * is set to the empty string and it behaves as case 2.
*
* In a child table that inherits from a parent already containing NOT NULL
* constraints and the columns in the child don't have their own NOT NULL
@@ -9730,6 +9747,7 @@ static void
determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
int i_notnull_name,
+ int i_notnull_comment,
int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
@@ -9803,11 +9821,13 @@ determineNotNullFlags(Archive *fout, PGresult *res, int r,
{
/*
* In binary upgrade of inheritance child tables, must have a
- * constraint name that we can UPDATE later.
+ * constraint name that we can UPDATE later; same if there's a
+ * comment on the constraint.
*/
- if (dopt->binary_upgrade &&
- !tbinfo->ispartition &&
- !tbinfo->notnull_islocal)
+ if ((dopt->binary_upgrade &&
+ !tbinfo->ispartition &&
+ !tbinfo->notnull_islocal) ||
+ !PQgetisnull(res, r, i_notnull_comment))
{
tbinfo->notnull_constrs[j] =
pstrdup(PQgetvalue(res, r, i_notnull_name));
@@ -10765,6 +10785,9 @@ fetchAttributeStats(Archive *fout)
restarted = true;
}
+ appendPQExpBufferChar(nspnames, '{');
+ appendPQExpBufferChar(relnames, '{');
+
/*
* Scan the TOC for the next set of relevant stats entries. We assume
* that statistics are dumped in the order they are listed in the TOC.
@@ -10776,23 +10799,25 @@ fetchAttributeStats(Archive *fout)
if ((te->reqs & REQ_STATS) != 0 &&
strcmp(te->desc, "STATISTICS DATA") == 0)
{
- appendPQExpBuffer(nspnames, "%s%s", count ? "," : "",
- fmtId(te->namespace));
- appendPQExpBuffer(relnames, "%s%s", count ? "," : "",
- fmtId(te->tag));
+ appendPGArray(nspnames, te->namespace);
+ appendPGArray(relnames, te->tag);
count++;
}
}
+ appendPQExpBufferChar(nspnames, '}');
+ appendPQExpBufferChar(relnames, '}');
+
/* Execute the query for the next batch of relations. */
if (count > 0)
{
PQExpBuffer query = createPQExpBuffer();
- appendPQExpBuffer(query, "EXECUTE getAttributeStats("
- "'{%s}'::pg_catalog.name[],"
- "'{%s}'::pg_catalog.name[])",
- nspnames->data, relnames->data);
+ appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
+ appendStringLiteralAH(query, nspnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[],");
+ appendStringLiteralAH(query, relnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[])");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
destroyPQExpBuffer(query);
}
@@ -10850,7 +10875,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
expected_te = expected_te->next;
if (te != expected_te)
- pg_fatal("stats dumped out of order (current: %d %s %s) (expected: %d %s %s)",
+ pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
te->dumpId, te->desc, te->tag,
expected_te->dumpId, expected_te->desc, expected_te->tag);
@@ -10924,7 +10949,20 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
appendPQExpBufferStr(out, ",\n");
appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
- appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
+ /*
+ * Before v14, a reltuples value of 0 was ambiguous: it could either mean
+ * the relation is empty, or it could mean that it hadn't yet been
+ * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
+ * This ambiguity allegedly can cause the planner to choose inefficient
+ * plans after restoring to v18 or newer. To deal with this, let's just
+ * set reltuples to -1 in that case.
+ */
+ if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
+ appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
+ else
+ appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
rsinfo->relallvisible);
@@ -10978,7 +11016,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
if (PQgetisnull(res, rownum, i_attname))
- pg_fatal("attname cannot be NULL");
+ pg_fatal("unexpected null attname");
attname = PQgetvalue(res, rownum, i_attname);
/*
@@ -17666,6 +17704,56 @@ dumpTableSchema(Archive *fout, const TableInfo *tbinfo)
if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
dumpTableSecLabel(fout, tbinfo, reltypename);
+ /*
+ * Dump comments for not-null constraints that aren't to be dumped
+ * separately (those are processed by collectComments/dumpComment).
+ */
+ if (!fout->dopt->no_comments && dopt->dumpSchema &&
+ fout->remoteVersion >= 180000)
+ {
+ PQExpBuffer comment = NULL;
+ PQExpBuffer tag = NULL;
+
+ for (j = 0; j < tbinfo->numatts; j++)
+ {
+ if (tbinfo->notnull_constrs[j] != NULL &&
+ tbinfo->notnull_comment[j] != NULL)
+ {
+ if (comment == NULL)
+ {
+ comment = createPQExpBuffer();
+ tag = createPQExpBuffer();
+ }
+ else
+ {
+ resetPQExpBuffer(comment);
+ resetPQExpBuffer(tag);
+ }
+
+ appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
+ fmtId(tbinfo->notnull_constrs[j]), qualrelname);
+ appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
+ appendPQExpBufferStr(comment, ";\n");
+
+ appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
+ fmtId(tbinfo->notnull_constrs[j]), qrelname);
+
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = comment->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
+ }
+ }
+
+ destroyPQExpBuffer(comment);
+ destroyPQExpBuffer(tag);
+ }
+
/* Dump comments on inlined table constraints */
for (j = 0; j < tbinfo->ncheck; j++)
{
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 7417eab6aef..39eef1d6617 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -365,6 +365,7 @@ typedef struct _tableInfo
* there isn't one on this column. If
* empty string, unnamed constraint
* (pre-v17) */
+ char **notnull_comment; /* comment thereof */
bool *notnull_invalid; /* true for NOT NULL NOT VALID */
bool *notnull_noinh; /* NOT NULL is NO INHERIT */
bool *notnull_islocal; /* true if NOT NULL has local definition */
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 7f9c302b719..3cbcad65c5f 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -525,7 +525,7 @@ main(int argc, char *argv[])
OPF = fopen(global_path, PG_BINARY_W);
if (!OPF)
- pg_fatal("could not open \"%s\": %m", global_path);
+ pg_fatal("could not open file \"%s\": %m", global_path);
}
else if (filename)
{
@@ -699,7 +699,7 @@ main(int argc, char *argv[])
static void
help(void)
{
- printf(_("%s extracts a PostgreSQL database cluster based on specified dump format.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
@@ -1659,14 +1659,14 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
/* Create a subdirectory with 'databases' name under main directory. */
if (mkdir(db_subdir, pg_dir_create_mode) != 0)
- pg_fatal("could not create subdirectory \"%s\": %m", db_subdir);
+ pg_fatal("could not create directory \"%s\": %m", db_subdir);
snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename);
/* Create a map file (to store dboid and dbname) */
map_file = fopen(map_file_path, PG_BINARY_W);
if (!map_file)
- pg_fatal("could not open map file: %s", strerror(errno));
+ pg_fatal("could not open file \"%s\": %m", map_file_path);
}
for (i = 0; i < PQntuples(res); i++)
@@ -1976,7 +1976,7 @@ parseDumpFormat(const char *format)
else if (pg_strcasecmp(format, "tar") == 0)
archDumpFormat = archTar;
else
- pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
+ pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
format);
return archDumpFormat;
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index f2182e91825..6ef789cb06d 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -523,7 +523,7 @@ main(int argc, char **argv)
*/
if (!globals_only && opts->createDB != 1)
{
- pg_log_error("-C/--create option should be specified when restoring an archive created by pg_dumpall");
+ pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
pg_log_error_hint("Individual databases can be restored using their specific archives.");
exit_nicely(1);
@@ -557,7 +557,7 @@ main(int argc, char **argv)
if (conn)
PQfinish(conn);
- pg_log_info("database restoring skipped as -g/--globals-only option was specified");
+ pg_log_info("database restoring skipped because option -g/--globals-only was specified");
}
else
{
@@ -712,9 +712,9 @@ usage(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data dump the data\n"));
- printf(_(" --with-schema dump the schema\n"));
- printf(_(" --with-statistics dump the statistics\n"));
+ printf(_(" --with-data restore the data\n"));
+ printf(_(" --with-schema restore the schema\n"));
+ printf(_(" --with-statistics restore the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
@@ -725,8 +725,8 @@ usage(const char *progname)
printf(_(" --role=ROLENAME do SET ROLE before restore\n"));
printf(_("\n"
- "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be combined\n"
- "and specified multiple times to select multiple objects.\n"));
+ "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n"
+ "combined and specified multiple times to select multiple objects.\n"));
printf(_("\nIf no input file name is supplied, then standard input is used.\n\n"));
printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
@@ -946,7 +946,7 @@ get_dbnames_list_to_restore(PGconn *conn,
query = createPQExpBuffer();
if (!conn)
- pg_log_info("considering PATTERN as NAME for --exclude-database option as no db connection while doing pg_restore.");
+ pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore");
/*
* Process one by one all dbnames and if specified to skip restoring, then
@@ -992,7 +992,7 @@ get_dbnames_list_to_restore(PGconn *conn,
if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res))
{
skip_db_restore = true;
- pg_log_info("database \"%s\" matches exclude pattern: \"%s\"", dbidname->str, pat_cell->val);
+ pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val);
}
PQclear(res);
@@ -1048,7 +1048,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
*/
if (!file_exists_in_directory(dumpdirpath, "map.dat"))
{
- pg_log_info("database restoring is skipped as \"map.dat\" is not present in \"%s\"", dumpdirpath);
+ pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath);
return 0;
}
@@ -1058,7 +1058,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
pfile = fopen(map_file_path, PG_BINARY_R);
if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", map_file_path);
+ pg_fatal("could not open file \"%s\": %m", map_file_path);
initStringInfo(&linebuf);
@@ -1086,10 +1086,10 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
/* Report error and exit if the file has any corrupted data. */
if (!OidIsValid(db_oid) || namelen <= 1)
- pg_fatal("invalid entry in \"%s\" at line: %d", map_file_path,
+ pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path,
count + 1);
- pg_log_info("found database \"%s\" (OID: %u) in \"%s\"",
+ pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"",
dbname, db_oid, map_file_path);
dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1);
@@ -1142,11 +1142,14 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
if (dbname_oid_list.head == NULL)
return process_global_sql_commands(conn, dumpdirpath, opts->filename);
- pg_log_info("found %d database names in \"map.dat\"", num_total_db);
+ pg_log_info(ngettext("found %d database name in \"%s\"",
+ "found %d database names in \"%s\"",
+ num_total_db),
+ num_total_db, "map.dat");
if (!conn)
{
- pg_log_info("trying to connect database \"postgres\"");
+ pg_log_info("trying to connect to database \"%s\"", "postgres");
conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost,
opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
@@ -1155,7 +1158,7 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
/* Try with template1. */
if (!conn)
{
- pg_log_info("trying to connect database \"template1\"");
+ pg_log_info("trying to connect to database \"%s\"", "template1");
conn = ConnectDatabase("template1", NULL, opts->cparams.pghost,
opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
@@ -1179,7 +1182,9 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
/* Exit if no db needs to be restored. */
if (dbname_oid_list.head == NULL || num_db_restore == 0)
{
- pg_log_info("no database needs to restore out of %d databases", num_total_db);
+ pg_log_info(ngettext("no database needs restoring out of %d database",
+ "no database needs restoring out of %d databases", num_total_db),
+ num_total_db);
return n_errors_total;
}
@@ -1314,7 +1319,7 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o
pfile = fopen(global_file_path, PG_BINARY_R);
if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", global_file_path);
+ pg_fatal("could not open file \"%s\": %m", global_file_path);
/*
* If outfile is given, then just copy all global.dat file data into
@@ -1354,15 +1359,17 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o
break;
default:
n_errors++;
- pg_log_error("could not execute query: \"%s\" \nCommand was: \"%s\"", PQerrorMessage(conn), sqlstatement.data);
+ pg_log_error("could not execute query: %s", PQerrorMessage(conn));
+ pg_log_error_detail("Command was: %s", sqlstatement.data);
}
PQclear(result);
}
/* Print a summary of ignored errors during global.dat. */
if (n_errors)
- pg_log_warning("ignored %d errors in \"%s\"", n_errors, global_file_path);
-
+ pg_log_warning(ngettext("ignored %d error in file \"%s\"",
+ "ignored %d errors in file \"%s\"", n_errors),
+ n_errors, global_file_path);
fclose(pfile);
return n_errors;
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 84ca25e17d6..c3c5fae11ea 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -240,17 +240,20 @@ command_fails_like(
command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
- 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only');
+ 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'
+);
command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --exclude-database is used in pg_restore with dump of pg_dump');
+ 'When option --exclude-database is used in pg_restore with dump of pg_dump'
+);
command_fails_like(
[ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --globals-only is not used in pg_restore with dump of pg_dump');
+ 'When option --globals-only is not used in pg_restore with dump of pg_dump'
+);
# also fails for -r and -t, but it seems pointless to add more tests for those.
command_fails_like(
@@ -261,6 +264,6 @@ command_fails_like(
command_fails_like(
[ 'pg_dumpall', '--format', 'x' ],
- qr/\Qpg_dumpall: error: unrecognized archive format "x";\E/,
- 'pg_dumpall: unrecognized archive format');
+ qr/\Qpg_dumpall: error: unrecognized output format "x";\E/,
+ 'pg_dumpall: unrecognized output format');
done_testing();
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 55d892d9c16..2485d8f360e 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -68,6 +68,7 @@ my %pgdump_runs = (
'--no-data',
'--sequence-data',
'--binary-upgrade',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
restore_cmd => [
@@ -75,6 +76,7 @@ my %pgdump_runs = (
'--format' => 'custom',
'--verbose',
'--file' => "$tempdir/binary_upgrade.sql",
+ '--with-statistics',
"$tempdir/binary_upgrade.dump",
],
},
@@ -88,11 +90,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_gzip_custom.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_custom.dump",
],
command_like => {
@@ -115,6 +119,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'gzip:1',
'--file' => "$tempdir/compression_gzip_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -132,6 +137,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_gzip_dir.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_dir",
],
},
@@ -144,6 +150,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_plain.sql.gz",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -162,11 +169,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_lz4_custom.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
@@ -189,6 +198,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'lz4:1',
'--file' => "$tempdir/compression_lz4_dir",
+ '--with-statistics',
'postgres',
],
# Verify that data files were compressed
@@ -200,6 +210,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_lz4_dir.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_dir",
],
},
@@ -212,6 +223,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_plain.sql.lz4",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -233,11 +245,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'zstd',
'--file' => "$tempdir/compression_zstd_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_zstd_custom.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_custom.dump",
],
command_like => {
@@ -259,6 +273,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'zstd:1',
'--file' => "$tempdir/compression_zstd_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -279,6 +294,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_zstd_dir.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_dir",
],
},
@@ -292,6 +308,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'zstd:long',
'--file' => "$tempdir/compression_zstd_plain.sql.zst",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -310,6 +327,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/clean.sql",
'--clean',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
},
@@ -320,6 +338,7 @@ my %pgdump_runs = (
'--clean',
'--if-exists',
'--encoding' => 'UTF8', # no-op, just for testing
+ '--with-statistics',
'postgres',
],
},
@@ -338,6 +357,7 @@ my %pgdump_runs = (
'--create',
'--no-reconnect', # no-op, just for testing
'--verbose',
+ '--with-statistics',
'postgres',
],
},
@@ -348,7 +368,7 @@ my %pgdump_runs = (
'--data-only',
'--superuser' => 'test_superuser',
'--disable-triggers',
- '--verbose', # no-op, just make sure it works
+ '--verbose', # no-op, just make sure it works
'postgres',
],
},
@@ -356,6 +376,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults.sql",
+ '--with-statistics',
'postgres',
],
},
@@ -364,6 +385,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_no_public.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -373,6 +395,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--clean',
'--file' => "$tempdir/defaults_no_public_clean.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -381,6 +404,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_public_owner.sql",
+ '--with-statistics',
'regress_public_owner',
],
},
@@ -395,12 +419,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.sql",
+ '--with-statistics',
"$tempdir/defaults_custom_format.dump",
],
command_like => {
@@ -425,12 +451,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format.sql",
+ '--with-statistics',
"$tempdir/defaults_dir_format",
],
command_like => {
@@ -456,11 +484,13 @@ my %pgdump_runs = (
'--format' => 'directory',
'--jobs' => 2,
'--file' => "$tempdir/defaults_parallel",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/defaults_parallel.sql",
+ '--with-statistics',
"$tempdir/defaults_parallel",
],
},
@@ -472,12 +502,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.tar",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.sql",
+ '--with-statistics',
"$tempdir/defaults_tar_format.tar",
],
},
@@ -486,6 +518,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_dump_test_schema.sql",
'--exclude-schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -494,6 +527,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_test_table.sql",
'--exclude-table' => 'dump_test.test_table',
+ '--with-statistics',
'postgres',
],
},
@@ -502,6 +536,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_measurement.sql",
'--exclude-table-and-children' => 'dump_test.measurement',
+ '--with-statistics',
'postgres',
],
},
@@ -511,6 +546,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_measurement_data.sql",
'--exclude-table-data-and-children' => 'dump_test.measurement',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -520,6 +556,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_test_table_data.sql",
'--exclude-table-data' => 'dump_test.test_table',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -538,6 +575,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_globals.sql",
'--globals-only',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_globals_clean => {
@@ -547,12 +585,14 @@ my %pgdump_runs = (
'--globals-only',
'--clean',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_dbprivs => {
dump_cmd => [
'pg_dumpall', '--no-sync',
'--file' => "$tempdir/pg_dumpall_dbprivs.sql",
+ '--with-statistics',
],
},
pg_dumpall_exclude => {
@@ -562,6 +602,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_exclude.sql",
'--exclude-database' => '*dump_test*',
'--no-sync',
+ '--with-statistics',
],
},
no_toast_compression => {
@@ -569,6 +610,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_toast_compression.sql",
'--no-toast-compression',
+ '--with-statistics',
'postgres',
],
},
@@ -577,6 +619,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_large_objects.sql",
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -585,6 +628,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_policies.sql",
'--no-policies',
+ '--with-statistics',
'postgres',
],
},
@@ -593,6 +637,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_privs.sql",
'--no-privileges',
+ '--with-statistics',
'postgres',
],
},
@@ -601,6 +646,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_owner.sql",
'--no-owner',
+ '--with-statistics',
'postgres',
],
},
@@ -609,6 +655,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_table_access_method.sql",
'--no-table-access-method',
+ '--with-statistics',
'postgres',
],
},
@@ -617,6 +664,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/only_dump_test_schema.sql",
'--schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -627,6 +675,7 @@ my %pgdump_runs = (
'--table' => 'dump_test.test_table',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -637,6 +686,7 @@ my %pgdump_runs = (
'--table-and-children' => 'dump_test.measurement',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -646,6 +696,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/role.sql",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
},
@@ -658,11 +709,13 @@ my %pgdump_runs = (
'--file' => "$tempdir/role_parallel",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/role_parallel.sql",
+ '--with-statistics',
"$tempdir/role_parallel",
],
},
@@ -691,6 +744,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_pre_data.sql",
'--section' => 'pre-data',
+ '--with-statistics',
'postgres',
],
},
@@ -699,6 +753,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_data.sql",
'--section' => 'data',
+ '--with-statistics',
'postgres',
],
},
@@ -707,6 +762,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_post_data.sql",
'--section' => 'post-data',
+ '--with-statistics',
'postgres',
],
},
@@ -717,6 +773,7 @@ my %pgdump_runs = (
'--schema' => 'dump_test',
'--large-objects',
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -732,6 +789,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
"--file=$tempdir/no_data_no_schema.sql", '--no-data',
'--no-schema', 'postgres',
+ '--with-statistics',
],
},
statistics_only => {
@@ -752,7 +810,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
"--file=$tempdir/no_schema.sql", '--no-schema',
- 'postgres',
+ '--with-statistics', 'postgres',
],
},);
@@ -1132,7 +1190,9 @@ my %tests = (
) INHERITS (dump_test.test_table_nn, dump_test.test_table_nn_2);
ALTER TABLE dump_test.test_table_nn ADD CONSTRAINT nn NOT NULL col1 NOT VALID;
ALTER TABLE dump_test.test_table_nn_chld1 VALIDATE CONSTRAINT nn;
- ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;',
+ ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS \'nn comment is valid\';
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS \'nn_chld2 comment is valid\';',
regexp => qr/^
\QALTER TABLE dump_test.test_table_nn\E \n^\s+
\QADD CONSTRAINT nn NOT NULL col1 NOT VALID;\E
@@ -1146,6 +1206,34 @@ my %tests = (
},
},
+ # This constraint is invalid therefore it goes in SECTION_POST_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_nn' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_post_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
+ # This constraint is valid therefore it goes in SECTION_PRE_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_chld2' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_pre_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
'CONSTRAINT NOT NULL / NOT VALID (child1)' => {
regexp => qr/^
\QCREATE TABLE dump_test.test_table_nn_chld1 (\E\n
@@ -4834,13 +4922,13 @@ my %tests = (
CREATE TABLE dump_test.has_stats
AS SELECT g.g AS x, g.g / 2 AS y FROM generate_series(1,100) AS g(g);
CREATE MATERIALIZED VIEW dump_test.has_stats_mv AS SELECT * FROM dump_test.has_stats;
- CREATE INDEX dup_test_post_data_ix ON dump_test.has_stats(x, (x - 1));
+ CREATE INDEX """dump_test""\'s post-data index" ON dump_test.has_stats(x, (x - 1));
ANALYZE dump_test.has_stats, dump_test.has_stats_mv;',
regexp => qr/^
\QSELECT * FROM pg_catalog.pg_restore_relation_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'relpages',\s'\d+'::integer,\s+
'reltuples',\s'\d+'::real,\s+
'relallvisible',\s'\d+'::integer,\s+
@@ -4849,7 +4937,7 @@ my %tests = (
\QSELECT * FROM pg_catalog.pg_restore_attribute_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'attnum',\s'2'::smallint,\s+
'inherited',\s'f'::boolean,\s+
'null_frac',\s'0'::real,\s+
diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl
index 5acd49f1559..c274b777586 100644
--- a/src/bin/pg_dump/t/006_pg_dumpall.pl
+++ b/src/bin/pg_dump/t/006_pg_dumpall.pl
@@ -294,17 +294,17 @@ my %pgdumpall_runs = (
'--format' => 'directory',
'--globals-only',
'--file' => "$tempdir/dump_globals_only",
- ],
- restore_cmd => [
- 'pg_restore', '-C', '--globals-only',
- '--format' => 'directory',
- '--file' => "$tempdir/dump_globals_only.sql",
- "$tempdir/dump_globals_only",
- ],
- like => qr/
+ ],
+ restore_cmd => [
+ 'pg_restore', '-C', '--globals-only',
+ '--format' => 'directory',
+ '--file' => "$tempdir/dump_globals_only.sql",
+ "$tempdir/dump_globals_only",
+ ],
+ like => qr/
^\s*\QCREATE ROLE dumpall;\E\s*\n
/xm
- }, );
+ },);
# First execute the setup_sql
foreach my $run (sort keys %pgdumpall_runs)
@@ -339,7 +339,8 @@ foreach my $run (sort keys %pgdumpall_runs)
# pg_restore --file output file.
my $output_file = slurp_file("$tempdir/${run}.sql");
- if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike}))
+ if ( !($pgdumpall_runs{$run}->{like})
+ && !($pgdumpall_runs{$run}->{unlike}))
{
die "missing \"like\" or \"unlike\" in test \"$run\"";
}
@@ -361,30 +362,38 @@ foreach my $run (sort keys %pgdumpall_runs)
# Some negative test case with dump of pg_dumpall and restore using pg_restore
# test case 1: when -C is not used in pg_restore with dump of pg_dumpall
$node->command_fails_like(
- [ 'pg_restore',
- "$tempdir/format_custom",
- '--format' => 'custom',
- '--file' => "$tempdir/error_test.sql", ],
- qr/\Qpg_restore: error: -C\/--create option should be specified when restoring an archive created by pg_dumpall\E/,
- 'When -C is not used in pg_restore with dump of pg_dumpall');
+ [
+ 'pg_restore',
+ "$tempdir/format_custom",
+ '--format' => 'custom',
+ '--file' => "$tempdir/error_test.sql",
+ ],
+ qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/,
+ 'When -C is not used in pg_restore with dump of pg_dumpall');
# test case 2: When --list option is used with dump of pg_dumpall
$node->command_fails_like(
- [ 'pg_restore',
+ [
+ 'pg_restore',
"$tempdir/format_custom", '-C',
- '--format' => 'custom', '--list',
- '--file' => "$tempdir/error_test.sql", ],
+ '--format' => 'custom',
+ '--list',
+ '--file' => "$tempdir/error_test.sql",
+ ],
qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/,
'When --list is used in pg_restore with dump of pg_dumpall');
# test case 3: When non-exist database is given with -d option
$node->command_fails_like(
- [ 'pg_restore',
+ [
+ 'pg_restore',
"$tempdir/format_custom", '-C',
'--format' => 'custom',
- '-d' => 'dbpq', ],
+ '-d' => 'dbpq',
+ ],
qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
- 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall');
+ 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'
+);
$node->stop('fast');