aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Dunstan <andrew@dunslane.net>2025-04-04 10:05:38 -0400
committerAndrew Dunstan <andrew@dunslane.net>2025-04-04 16:01:22 -0400
commit1495eff7bdb0779cc54ca04f3bd768f647240df2 (patch)
treee4160823fd79737bf1d527da8eadf6f3f82570c6
parent2b69afbe50d5e39cc7d9703b3ab7acc4495a54ea (diff)
downloadpostgresql-1495eff7bdb0779cc54ca04f3bd768f647240df2.tar.gz
postgresql-1495eff7bdb0779cc54ca04f3bd768f647240df2.zip
Non text modes for pg_dumpall, correspondingly change pg_restore
pg_dumpall acquires a new -F/--format option, with the same meanings as pg_dump. The default is p, meaning plain text. For any other value, a directory is created containing two files, globals.data and map.dat. The first contains SQL for restoring the global data, and the second contains a map from oids to database names. It will also contain a subdirectory called databases, inside which it will create archives in the specified format, named using the database oids. In these casess the -f argument is required. If pg_restore encounters a directory containing globals.dat, and no toc.dat, it restores the global settings and then restores each database. pg_restore acquires two new options: -g/--globals-only which suppresses restoration of any databases, and --exclude-database which inhibits restoration of particualr database(s) in the same way the same option works in pg_dumpall. Author: Mahendra Singh Thalor <mahi6run@gmail.com> Co-authored-by: Andrew Dunstan <andrew@dunslane.net> Reviewed-by: jian he <jian.universality@gmail.com> Reviewed-by: Srinath Reddy <srinath2133@gmail.com> Reviewed-by: Álvaro Herrera <alvherre@alvh.no-ip.org> Discussion: https://postgr.es/m/cb103623-8ee6-4ba5-a2c9-f32e3a4933fa@dunslane.net
-rw-r--r--doc/src/sgml/ref/pg_dumpall.sgml86
-rw-r--r--doc/src/sgml/ref/pg_restore.sgml66
-rw-r--r--src/bin/pg_dump/parallel.c10
-rw-r--r--src/bin/pg_dump/pg_backup.h2
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c20
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h1
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c2
-rw-r--r--src/bin/pg_dump/pg_dump.c2
-rw-r--r--src/bin/pg_dump/pg_dumpall.c294
-rw-r--r--src/bin/pg_dump/pg_restore.c794
-rw-r--r--src/bin/pg_dump/t/001_basic.pl9
11 files changed, 1201 insertions, 85 deletions
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index 765b30a3a66..43fdab2d77e 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -16,7 +16,7 @@ PostgreSQL documentation
<refnamediv>
<refname>pg_dumpall</refname>
- <refpurpose>extract a <productname>PostgreSQL</productname> database cluster into a script file</refpurpose>
+ <refpurpose>extract a <productname>PostgreSQL</productname> database cluster using a specified dump format</refpurpose>
</refnamediv>
<refsynopsisdiv>
@@ -33,7 +33,7 @@ PostgreSQL documentation
<para>
<application>pg_dumpall</application> is a utility for writing out
(<quote>dumping</quote>) all <productname>PostgreSQL</productname> databases
- of a cluster into one script file. The script file contains
+ of a cluster into an archive. The archive contains
<acronym>SQL</acronym> commands that can be used as input to <xref
linkend="app-psql"/> to restore the databases. It does this by
calling <xref linkend="app-pgdump"/> for each database in the cluster.
@@ -52,12 +52,17 @@ PostgreSQL documentation
</para>
<para>
- The SQL script will be written to the standard output. Use the
+ Plain text SQL scripts will be written to the standard output. Use the
<option>-f</option>/<option>--file</option> option or shell operators to
redirect it into a file.
</para>
<para>
+ Archives in other formats will be placed in a directory named using the
+ <option>-f</option>/<option>--file</option>, which is required in this case.
+ </para>
+
+ <para>
<application>pg_dumpall</application> needs to connect several
times to the <productname>PostgreSQL</productname> server (once per
database). If you use password authentication it will ask for
@@ -121,11 +126,86 @@ PostgreSQL documentation
<para>
Send output to the specified file. If this is omitted, the
standard output is used.
+ Note: This option can only be omitted when <option>--format</option> is plain
</para>
</listitem>
</varlistentry>
<varlistentry>
+ <term><option>-F <replaceable class="parameter">format</replaceable></option></term>
+ <term><option>--format=<replaceable class="parameter">format</replaceable></option></term>
+ <listitem>
+ <para>
+ Specify the format of dump files. In plain format, all the dump data is
+ sent in a single text stream. This is the default.
+
+ In all other modes, <application>pg_dumpall</application> first creates two files:
+ <filename>global.dat</filename> and <filename>map.dat</filename>, in the directory
+ specified by <option>--file</option>.
+ The first file contains global data, such as roles and tablespaces. The second
+ contains a mapping between database oids and names. These files are used by
+ <application>pg_restore</application>. Data for individual databases is placed in
+ <filename>databases</filename> subdirectory, named using the database's <type>oid</type>.
+
+ <variablelist>
+ <varlistentry>
+ <term><literal>d</literal></term>
+ <term><literal>directory</literal></term>
+ <listitem>
+ <para>
+ Output directory-format archives for each database,
+ suitable for input into pg_restore. The directory
+ will have database <type>oid</type> as its name.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>p</literal></term>
+ <term><literal>plain</literal></term>
+ <listitem>
+ <para>
+ Output a plain-text SQL script file (the default).
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>c</literal></term>
+ <term><literal>custom</literal></term>
+ <listitem>
+ <para>
+ Output a custom-format archive for each database,
+ suitable for input into pg_restore. The archive
+ will be named <filename>dboid.dmp</filename> where <type>dboid</type> is the
+ <type>oid</type> of the database.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>t</literal></term>
+ <term><literal>tar</literal></term>
+ <listitem>
+ <para>
+ Output a tar-format archive for each database,
+ suitable for input into pg_restore. The archive
+ will be named <filename>dboid.tar</filename> where <type>dboid</type> is the
+ <type>oid</type> of the database.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ Note: see <xref linkend="app-pgdump"/> for details
+ of how the various non plain text archives work.
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>--filter=<replaceable class="parameter">filename</replaceable></option></term>
<listitem>
<para>
diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml
index c840a807ae9..f14e5866f6c 100644
--- a/doc/src/sgml/ref/pg_restore.sgml
+++ b/doc/src/sgml/ref/pg_restore.sgml
@@ -18,8 +18,9 @@ PostgreSQL documentation
<refname>pg_restore</refname>
<refpurpose>
- restore a <productname>PostgreSQL</productname> database from an
- archive file created by <application>pg_dump</application>
+ restore a <productname>PostgreSQL</productname> database or cluster
+ from an archive created by <application>pg_dump</application> or
+ <application>pg_dumpall</application>
</refpurpose>
</refnamediv>
@@ -38,13 +39,14 @@ PostgreSQL documentation
<para>
<application>pg_restore</application> is a utility for restoring a
- <productname>PostgreSQL</productname> database from an archive
- created by <xref linkend="app-pgdump"/> in one of the non-plain-text
+ <productname>PostgreSQL</productname> database or cluster from an archive
+ created by <xref linkend="app-pgdump"/> or
+ <xref linkend="app-pg-dumpall"/> in one of the non-plain-text
formats. It will issue the commands necessary to reconstruct the
- database to the state it was in at the time it was saved. The
- archive files also allow <application>pg_restore</application> to
+ database or cluster to the state it was in at the time it was saved. The
+ archives also allow <application>pg_restore</application> to
be selective about what is restored, or even to reorder the items
- prior to being restored. The archive files are designed to be
+ prior to being restored. The archive formats are designed to be
portable across architectures.
</para>
@@ -52,10 +54,17 @@ PostgreSQL documentation
<application>pg_restore</application> can operate in two modes.
If a database name is specified, <application>pg_restore</application>
connects to that database and restores archive contents directly into
- the database. Otherwise, a script containing the SQL
- commands necessary to rebuild the database is created and written
+ the database.
+ When restoring from a dump made by<application>pg_dumpall</application>,
+ each database will be created and then the restoration will be run in that
+ database.
+
+ Otherwise, when a database name is not specified, a script containing the SQL
+ commands necessary to rebuild the database or cluster is created and written
to a file or standard output. This script output is equivalent to
- the plain text output format of <application>pg_dump</application>.
+ the plain text output format of <application>pg_dump</application> or
+ <application>pg_dumpall</application>.
+
Some of the options controlling the output are therefore analogous to
<application>pg_dump</application> options.
</para>
@@ -140,6 +149,8 @@ PostgreSQL documentation
commands that mention this database.
Access privileges for the database itself are also restored,
unless <option>--no-acl</option> is specified.
+ <option>--create</option> is required when restoring multiple databases
+ from an archive created by <application>pg_dumpall</application>.
</para>
<para>
@@ -167,6 +178,28 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
+ <term><option>--exclude-database=<replaceable class="parameter">pattern</replaceable></option></term>
+ <listitem>
+ <para>
+ Do not restore databases whose name matches
+ <replaceable class="parameter">pattern</replaceable>.
+ Multiple patterns can be excluded by writing multiple
+ <option>--exclude-database</option> switches. The
+ <replaceable class="parameter">pattern</replaceable> parameter is
+ interpreted as a pattern according to the same rules used by
+ <application>psql</application>'s <literal>\d</literal>
+ commands (see <xref linkend="app-psql-patterns"/>),
+ so multiple databases can also be excluded by writing wildcard
+ characters in the pattern. When using wildcards, be careful to
+ quote the pattern if needed to prevent shell wildcard expansion.
+ </para>
+ <para>
+ This option is only relevant when restoring from an archive made using <application>pg_dumpall</application>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>-e</option></term>
<term><option>--exit-on-error</option></term>
<listitem>
@@ -316,6 +349,19 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
+ <term><option>-g</option></term>
+ <term><option>--globals-only</option></term>
+ <listitem>
+ <para>
+ Restore only global objects (roles and tablespaces), no databases.
+ </para>
+ <para>
+ This option is only relevant when restoring from an archive made using <application>pg_dumpall</application>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>-I <replaceable class="parameter">index</replaceable></option></term>
<term><option>--index=<replaceable class="parameter">index</replaceable></option></term>
<listitem>
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index 086adcdc502..5974d6706fd 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -334,6 +334,16 @@ on_exit_close_archive(Archive *AHX)
}
/*
+ * When pg_restore restores multiple databases, then update already added entry
+ * into array for cleanup.
+ */
+void
+replace_on_exit_close_archive(Archive *AHX)
+{
+ shutdown_info.AHX = AHX;
+}
+
+/*
* on_exit_nicely handler for shutting down database connections and
* worker processes cleanly.
*/
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 453ff83b321..5c8c1b3ea0a 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -315,7 +315,7 @@ extern void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ro
extern void ProcessArchiveRestoreOptions(Archive *AHX);
-extern void RestoreArchive(Archive *AHX);
+extern void RestoreArchive(Archive *AHX, bool append_data);
/* Open an existing archive */
extern Archive *OpenArchive(const char *FileSpec, const ArchiveFormat fmt);
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 7f6d4ed94e1..f961162f365 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -85,7 +85,7 @@ static int RestoringToDB(ArchiveHandle *AH);
static void dump_lo_buf(ArchiveHandle *AH);
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
static void SetOutput(ArchiveHandle *AH, const char *filename,
- const pg_compress_specification compression_spec);
+ const pg_compress_specification compression_spec, bool append_data);
static CompressFileHandle *SaveOutput(ArchiveHandle *AH);
static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput);
@@ -337,9 +337,14 @@ ProcessArchiveRestoreOptions(Archive *AHX)
StrictNamesCheck(ropt);
}
-/* Public */
+/*
+ * RestoreArchive
+ *
+ * If append_data is set, then append data into file as we are restoring dump
+ * of multiple databases which was taken by pg_dumpall.
+ */
void
-RestoreArchive(Archive *AHX)
+RestoreArchive(Archive *AHX, bool append_data)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
RestoreOptions *ropt = AH->public.ropt;
@@ -456,7 +461,7 @@ RestoreArchive(Archive *AHX)
*/
sav = SaveOutput(AH);
if (ropt->filename || ropt->compression_spec.algorithm != PG_COMPRESSION_NONE)
- SetOutput(AH, ropt->filename, ropt->compression_spec);
+ SetOutput(AH, ropt->filename, ropt->compression_spec, append_data);
ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
@@ -1295,7 +1300,7 @@ PrintTOCSummary(Archive *AHX)
sav = SaveOutput(AH);
if (ropt->filename)
- SetOutput(AH, ropt->filename, out_compression_spec);
+ SetOutput(AH, ropt->filename, out_compression_spec, false);
if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT,
localtime(&AH->createDate)) == 0)
@@ -1674,7 +1679,8 @@ archprintf(Archive *AH, const char *fmt,...)
static void
SetOutput(ArchiveHandle *AH, const char *filename,
- const pg_compress_specification compression_spec)
+ const pg_compress_specification compression_spec,
+ bool append_data)
{
CompressFileHandle *CFH;
const char *mode;
@@ -1694,7 +1700,7 @@ SetOutput(ArchiveHandle *AH, const char *filename,
else
fn = fileno(stdout);
- if (AH->mode == archModeAppend)
+ if (append_data || AH->mode == archModeAppend)
mode = PG_BINARY_A;
else
mode = PG_BINARY_W;
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index b7ebc2b39cd..859fbd322d1 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -390,6 +390,7 @@ struct _tocEntry
extern int parallel_restore(ArchiveHandle *AH, TocEntry *te);
extern void on_exit_close_archive(Archive *AHX);
+extern void replace_on_exit_close_archive(Archive *AHX);
extern void warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3);
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index b5ba3b46dd9..d94d0de2a5d 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -826,7 +826,7 @@ _CloseArchive(ArchiveHandle *AH)
savVerbose = AH->public.verbose;
AH->public.verbose = 0;
- RestoreArchive((Archive *) AH);
+ RestoreArchive((Archive *) AH, false);
SetArchiveOptions((Archive *) AH, savDopt, savRopt);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 784c067e8c6..0e915432e77 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -1222,7 +1222,7 @@ main(int argc, char **argv)
* right now.
*/
if (plainText)
- RestoreArchive(fout);
+ RestoreArchive(fout, false);
CloseArchive(fout);
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 573a8b61a45..bbcac81a8fe 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -15,6 +15,7 @@
#include "postgres_fe.h"
+#include <sys/stat.h>
#include <time.h>
#include <unistd.h>
@@ -64,9 +65,10 @@ static void dropTablespaces(PGconn *conn);
static void dumpTablespaces(PGconn *conn);
static void dropDBs(PGconn *conn);
static void dumpUserConfig(PGconn *conn, const char *username);
-static void dumpDatabases(PGconn *conn);
+static void dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat);
static void dumpTimestamp(const char *msg);
-static int runPgDump(const char *dbname, const char *create_opts);
+static int runPgDump(const char *dbname, const char *create_opts,
+ char *dbfile, ArchiveFormat archDumpFormat);
static void buildShSecLabels(PGconn *conn,
const char *catalog_name, Oid objectId,
const char *objtype, const char *objname,
@@ -75,6 +77,8 @@ static void executeCommand(PGconn *conn, const char *query);
static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns,
SimpleStringList *names);
static void read_dumpall_filters(const char *filename, SimpleStringList *pattern);
+static void create_or_open_dir(const char *dirname);
+static ArchiveFormat parseDumpFormat(const char *format);
static char pg_dump_bin[MAXPGPATH];
static PQExpBuffer pgdumpopts;
@@ -146,6 +150,7 @@ main(int argc, char *argv[])
{"password", no_argument, NULL, 'W'},
{"no-privileges", no_argument, NULL, 'x'},
{"no-acl", no_argument, NULL, 'x'},
+ {"format", required_argument, NULL, 'F'},
/*
* the following options don't have an equivalent short option letter
@@ -195,6 +200,8 @@ main(int argc, char *argv[])
char *pgdb = NULL;
char *use_role = NULL;
const char *dumpencoding = NULL;
+ ArchiveFormat archDumpFormat = archNull;
+ const char *formatName = "p";
trivalue prompt_password = TRI_DEFAULT;
bool data_only = false;
bool globals_only = false;
@@ -244,7 +251,7 @@ main(int argc, char *argv[])
pgdumpopts = createPQExpBuffer();
- while ((c = getopt_long(argc, argv, "acd:E:f:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1)
+ while ((c = getopt_long(argc, argv, "acd:E:f:F:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1)
{
switch (c)
{
@@ -272,7 +279,9 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " -f ");
appendShellString(pgdumpopts, filename);
break;
-
+ case 'F':
+ formatName = pg_strdup(optarg);
+ break;
case 'g':
globals_only = true;
break;
@@ -421,6 +430,21 @@ main(int argc, char *argv[])
exit_nicely(1);
}
+ /* Get format for dump. */
+ archDumpFormat = parseDumpFormat(formatName);
+
+ /*
+ * If a non-plain format is specified, a file name is also required as the
+ * path to the main directory.
+ */
+ if (archDumpFormat != archNull &&
+ (!filename || strcmp(filename, "") == 0))
+ {
+ pg_log_error("option -F/--format=d|c|t requires option -f/--file");
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
+ exit_nicely(1);
+ }
+
/*
* If password values are not required in the dump, switch to using
* pg_roles which is equally useful, just more likely to have unrestricted
@@ -484,6 +508,33 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " --statistics-only");
/*
+ * Open the output file if required, otherwise use stdout. If required,
+ * then create new directory and global.dat file.
+ */
+ if (archDumpFormat != archNull)
+ {
+ char global_path[MAXPGPATH];
+
+ /* Create new directory or accept the empty existing directory. */
+ create_or_open_dir(filename);
+
+ snprintf(global_path, MAXPGPATH, "%s/global.dat", filename);
+
+ OPF = fopen(global_path, PG_BINARY_W);
+ if (!OPF)
+ pg_fatal("could not open global.dat file: %s", strerror(errno));
+ }
+ else if (filename)
+ {
+ OPF = fopen(filename, PG_BINARY_W);
+ if (!OPF)
+ pg_fatal("could not open output file \"%s\": %m",
+ filename);
+ }
+ else
+ OPF = stdout;
+
+ /*
* If there was a database specified on the command line, use that,
* otherwise try to connect to database "postgres", and failing that
* "template1".
@@ -523,19 +574,6 @@ main(int argc, char *argv[])
&database_exclude_names);
/*
- * Open the output file if required, otherwise use stdout
- */
- if (filename)
- {
- OPF = fopen(filename, PG_BINARY_W);
- if (!OPF)
- pg_fatal("could not open output file \"%s\": %m",
- filename);
- }
- else
- OPF = stdout;
-
- /*
* Set the client encoding if requested.
*/
if (dumpencoding)
@@ -634,7 +672,7 @@ main(int argc, char *argv[])
}
if (!globals_only && !roles_only && !tablespaces_only)
- dumpDatabases(conn);
+ dumpDatabases(conn, archDumpFormat);
PQfinish(conn);
@@ -647,7 +685,7 @@ main(int argc, char *argv[])
fclose(OPF);
/* sync the resulting file, errors are not fatal */
- if (dosync)
+ if (dosync && (archDumpFormat == archNull))
(void) fsync_fname(filename, false);
}
@@ -658,12 +696,14 @@ main(int argc, char *argv[])
static void
help(void)
{
- printf(_("%s extracts a PostgreSQL database cluster into an SQL script file.\n\n"), progname);
+ printf(_("%s extracts a PostgreSQL database cluster based on specified dump format.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
printf(_("\nGeneral options:\n"));
printf(_(" -f, --file=FILENAME output file name\n"));
+ printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
+ " plain text (default))\n"));
printf(_(" -v, --verbose verbose mode\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
@@ -969,9 +1009,6 @@ dumpRoles(PGconn *conn)
* We do it this way because config settings for roles could mention the
* names of other roles.
*/
- if (PQntuples(res) > 0)
- fprintf(OPF, "\n--\n-- User Configurations\n--\n");
-
for (i = 0; i < PQntuples(res); i++)
dumpUserConfig(conn, PQgetvalue(res, i, i_rolname));
@@ -1485,6 +1522,7 @@ dumpUserConfig(PGconn *conn, const char *username)
{
PQExpBuffer buf = createPQExpBuffer();
PGresult *res;
+ static bool header_done = false;
printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
"WHERE setdatabase = 0 AND setrole = "
@@ -1496,7 +1534,13 @@ dumpUserConfig(PGconn *conn, const char *username)
res = executeQuery(conn, buf->data);
if (PQntuples(res) > 0)
+ {
+ if (!header_done)
+ fprintf(OPF, "\n--\n-- User Configurations\n--\n");
+ header_done = true;
+
fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", username);
+ }
for (int i = 0; i < PQntuples(res); i++)
{
@@ -1570,10 +1614,13 @@ expand_dbname_patterns(PGconn *conn,
* Dump contents of databases.
*/
static void
-dumpDatabases(PGconn *conn)
+dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
{
PGresult *res;
int i;
+ char db_subdir[MAXPGPATH];
+ char dbfilepath[MAXPGPATH];
+ FILE *map_file = NULL;
/*
* Skip databases marked not datallowconn, since we'd be unable to connect
@@ -1587,18 +1634,42 @@ dumpDatabases(PGconn *conn)
* doesn't have some failure mode with --clean.
*/
res = executeQuery(conn,
- "SELECT datname "
+ "SELECT datname, oid "
"FROM pg_database d "
"WHERE datallowconn AND datconnlimit != -2 "
"ORDER BY (datname <> 'template1'), datname");
- if (PQntuples(res) > 0)
+ if (archDumpFormat == archNull && PQntuples(res) > 0)
fprintf(OPF, "--\n-- Databases\n--\n\n");
+ /*
+ * If directory/tar/custom format is specified, create a subdirectory
+ * under the main directory and each database dump file or subdirectory
+ * will be created in that subdirectory by pg_dump.
+ */
+ if (archDumpFormat != archNull)
+ {
+ char map_file_path[MAXPGPATH];
+
+ snprintf(db_subdir, MAXPGPATH, "%s/databases", filename);
+
+ /* Create a subdirectory with 'databases' name under main directory. */
+ if (mkdir(db_subdir, 0755) != 0)
+ pg_fatal("could not create subdirectory \"%s\": %m", db_subdir);
+
+ snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename);
+
+ /* Create a map file (to store dboid and dbname) */
+ map_file = fopen(map_file_path, PG_BINARY_W);
+ if (!map_file)
+ pg_fatal("could not open map file: %s", strerror(errno));
+ }
+
for (i = 0; i < PQntuples(res); i++)
{
char *dbname = PQgetvalue(res, i, 0);
- const char *create_opts;
+ char *oid = PQgetvalue(res, i, 1);
+ const char *create_opts = "";
int ret;
/* Skip template0, even if it's not marked !datallowconn. */
@@ -1612,9 +1683,27 @@ dumpDatabases(PGconn *conn)
continue;
}
+ /*
+ * If this is not a plain format dump, then append dboid and dbname to
+ * the map.dat file.
+ */
+ if (archDumpFormat != archNull)
+ {
+ if (archDumpFormat == archCustom)
+ snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".dmp", db_subdir, oid);
+ else if (archDumpFormat == archTar)
+ snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".tar", db_subdir, oid);
+ else
+ snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\"", db_subdir, oid);
+
+ /* Put one line entry for dboid and dbname in map file. */
+ fprintf(map_file, "%s %s\n", oid, dbname);
+ }
+
pg_log_info("dumping database \"%s\"", dbname);
- fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
+ if (archDumpFormat == archNull)
+ fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
/*
* We assume that "template1" and "postgres" already exist in the
@@ -1628,12 +1717,9 @@ dumpDatabases(PGconn *conn)
{
if (output_clean)
create_opts = "--clean --create";
- else
- {
- create_opts = "";
- /* Since pg_dump won't emit a \connect command, we must */
+ /* Since pg_dump won't emit a \connect command, we must */
+ else if (archDumpFormat == archNull)
fprintf(OPF, "\\connect %s\n\n", dbname);
- }
}
else
create_opts = "--create";
@@ -1641,19 +1727,30 @@ dumpDatabases(PGconn *conn)
if (filename)
fclose(OPF);
- ret = runPgDump(dbname, create_opts);
+ ret = runPgDump(dbname, create_opts, dbfilepath, archDumpFormat);
if (ret != 0)
pg_fatal("pg_dump failed on database \"%s\", exiting", dbname);
if (filename)
{
- OPF = fopen(filename, PG_BINARY_A);
+ char global_path[MAXPGPATH];
+
+ if (archDumpFormat != archNull)
+ snprintf(global_path, MAXPGPATH, "%s/global.dat", filename);
+ else
+ snprintf(global_path, MAXPGPATH, "%s", filename);
+
+ OPF = fopen(global_path, PG_BINARY_A);
if (!OPF)
pg_fatal("could not re-open the output file \"%s\": %m",
- filename);
+ global_path);
}
}
+ /* Close map file */
+ if (archDumpFormat != archNull)
+ fclose(map_file);
+
PQclear(res);
}
@@ -1663,7 +1760,8 @@ dumpDatabases(PGconn *conn)
* Run pg_dump on dbname, with specified options.
*/
static int
-runPgDump(const char *dbname, const char *create_opts)
+runPgDump(const char *dbname, const char *create_opts, char *dbfile,
+ ArchiveFormat archDumpFormat)
{
PQExpBufferData connstrbuf;
PQExpBufferData cmd;
@@ -1672,17 +1770,36 @@ runPgDump(const char *dbname, const char *create_opts)
initPQExpBuffer(&connstrbuf);
initPQExpBuffer(&cmd);
- printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin,
- pgdumpopts->data, create_opts);
-
/*
- * If we have a filename, use the undocumented plain-append pg_dump
- * format.
+ * If this is not a plain format dump, then append file name and dump
+ * format to the pg_dump command to get archive dump.
*/
- if (filename)
- appendPQExpBufferStr(&cmd, " -Fa ");
+ if (archDumpFormat != archNull)
+ {
+ printfPQExpBuffer(&cmd, "\"%s\" -f %s %s", pg_dump_bin,
+ dbfile, create_opts);
+
+ if (archDumpFormat == archDirectory)
+ appendPQExpBufferStr(&cmd, " --format=directory ");
+ else if (archDumpFormat == archCustom)
+ appendPQExpBufferStr(&cmd, " --format=custom ");
+ else if (archDumpFormat == archTar)
+ appendPQExpBufferStr(&cmd, " --format=tar ");
+ }
else
- appendPQExpBufferStr(&cmd, " -Fp ");
+ {
+ printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin,
+ pgdumpopts->data, create_opts);
+
+ /*
+ * If we have a filename, use the undocumented plain-append pg_dump
+ * format.
+ */
+ if (filename)
+ appendPQExpBufferStr(&cmd, " -Fa ");
+ else
+ appendPQExpBufferStr(&cmd, " -Fp ");
+ }
/*
* Append the database name to the already-constructed stem of connection
@@ -1827,3 +1944,90 @@ read_dumpall_filters(const char *filename, SimpleStringList *pattern)
filter_free(&fstate);
}
+
+/*
+ * create_or_open_dir
+ *
+ * This will create a new directory with the given dirname. If there is
+ * already an empty directory with that name, then use it.
+ */
+static void
+create_or_open_dir(const char *dirname)
+{
+ struct stat st;
+ bool is_empty = false;
+
+ /* we accept an empty existing directory */
+ if (stat(dirname, &st) == 0 && S_ISDIR(st.st_mode))
+ {
+ DIR *dir = opendir(dirname);
+
+ if (dir)
+ {
+ struct dirent *d;
+
+ is_empty = true;
+
+ while (errno = 0, (d = readdir(dir)))
+ {
+ if (strcmp(d->d_name, ".") != 0 && strcmp(d->d_name, "..") != 0)
+ {
+ is_empty = false;
+ break;
+ }
+ }
+
+ if (errno)
+ pg_fatal("could not read directory \"%s\": %m",
+ dirname);
+
+ if (closedir(dir))
+ pg_fatal("could not close directory \"%s\": %m",
+ dirname);
+ }
+
+ if (!is_empty)
+ {
+ pg_log_error("directory \"%s\" exists but is not empty", dirname);
+ pg_log_error_hint("Either remove the directory "
+ "\"%s\" or its contents.",
+ dirname);
+ exit_nicely(1);
+ }
+ }
+ else if (mkdir(dirname, 0700) < 0)
+ pg_fatal("could not create directory \"%s\": %m", dirname);
+}
+
+/*
+ * parseDumpFormat
+ *
+ * This will validate dump formats.
+ */
+static ArchiveFormat
+parseDumpFormat(const char *format)
+{
+ ArchiveFormat archDumpFormat;
+
+ if (pg_strcasecmp(format, "c") == 0)
+ archDumpFormat = archCustom;
+ else if (pg_strcasecmp(format, "custom") == 0)
+ archDumpFormat = archCustom;
+ else if (pg_strcasecmp(format, "d") == 0)
+ archDumpFormat = archDirectory;
+ else if (pg_strcasecmp(format, "directory") == 0)
+ archDumpFormat = archDirectory;
+ else if (pg_strcasecmp(format, "p") == 0)
+ archDumpFormat = archNull;
+ else if (pg_strcasecmp(format, "plain") == 0)
+ archDumpFormat = archNull;
+ else if (pg_strcasecmp(format, "t") == 0)
+ archDumpFormat = archTar;
+ else if (pg_strcasecmp(format, "tar") == 0)
+ archDumpFormat = archTar;
+ else
+ pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
+ format);
+
+ return archDumpFormat;
+}
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 47f7b0dd3a1..ce0c05e4b73 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -2,7 +2,7 @@
*
* pg_restore.c
* pg_restore is an utility extracting postgres database definitions
- * from a backup archive created by pg_dump using the archiver
+ * from a backup archive created by pg_dump/pg_dumpall using the archiver
* interface.
*
* pg_restore will read the backup archive and
@@ -41,11 +41,15 @@
#include "postgres_fe.h"
#include <ctype.h>
+#include <sys/stat.h>
#ifdef HAVE_TERMIOS_H
#include <termios.h>
#endif
+#include "common/string.h"
+#include "connectdb.h"
#include "fe_utils/option_utils.h"
+#include "fe_utils/string_utils.h"
#include "filter.h"
#include "getopt_long.h"
#include "parallel.h"
@@ -53,18 +57,35 @@
static void usage(const char *progname);
static void read_restore_filters(const char *filename, RestoreOptions *opts);
+static bool file_exists_in_directory(const char *dir, const char *filename);
+static int restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
+ int numWorkers, bool append_data, int num);
+static int read_one_statement(StringInfo inBuf, FILE *pfile);
+static int restore_all_databases(PGconn *conn, const char *dumpdirpath,
+ SimpleStringList db_exclude_patterns, RestoreOptions *opts, int numWorkers);
+static int process_global_sql_commands(PGconn *conn, const char *dumpdirpath,
+ const char *outfile);
+static void copy_or_print_global_file(const char *outfile, FILE *pfile);
+static int get_dbnames_list_to_restore(PGconn *conn,
+ SimpleOidStringList *dbname_oid_list,
+ SimpleStringList db_exclude_patterns);
+static int get_dbname_oid_list_from_mfile(const char *dumpdirpath,
+ SimpleOidStringList *dbname_oid_list);
+static size_t quote_literal_internal(char *dst, const char *src, size_t len);
+static char *quote_literal_cstr(const char *rawstr);
int
main(int argc, char **argv)
{
RestoreOptions *opts;
int c;
- int exit_code;
int numWorkers = 1;
- Archive *AH;
char *inputFileSpec;
bool data_only = false;
bool schema_only = false;
+ int n_errors = 0;
+ bool globals_only = false;
+ SimpleStringList db_exclude_patterns = {NULL, NULL};
static int disable_triggers = 0;
static int enable_row_security = 0;
static int if_exists = 0;
@@ -90,6 +111,7 @@ main(int argc, char **argv)
{"clean", 0, NULL, 'c'},
{"create", 0, NULL, 'C'},
{"data-only", 0, NULL, 'a'},
+ {"globals-only", 0, NULL, 'g'},
{"dbname", 1, NULL, 'd'},
{"exit-on-error", 0, NULL, 'e'},
{"exclude-schema", 1, NULL, 'N'},
@@ -144,6 +166,7 @@ main(int argc, char **argv)
{"with-statistics", no_argument, &with_statistics, 1},
{"statistics-only", no_argument, &statistics_only, 1},
{"filter", required_argument, NULL, 4},
+ {"exclude-database", required_argument, NULL, 6},
{NULL, 0, NULL, 0}
};
@@ -172,7 +195,7 @@ main(int argc, char **argv)
}
}
- while ((c = getopt_long(argc, argv, "acCd:ef:F:h:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1",
+ while ((c = getopt_long(argc, argv, "acCd:ef:F:gh:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1",
cmdopts, NULL)) != -1)
{
switch (c)
@@ -199,11 +222,14 @@ main(int argc, char **argv)
if (strlen(optarg) != 0)
opts->formatName = pg_strdup(optarg);
break;
+ case 'g':
+ /* restore only global.dat file from directory */
+ globals_only = true;
+ break;
case 'h':
if (strlen(optarg) != 0)
opts->cparams.pghost = pg_strdup(optarg);
break;
-
case 'j': /* number of restore jobs */
if (!option_parse_int(optarg, "-j/--jobs", 1,
PG_MAX_JOBS,
@@ -318,6 +344,9 @@ main(int argc, char **argv)
exit(1);
opts->exit_on_error = true;
break;
+ case 6: /* database patterns to skip */
+ simple_string_list_append(&db_exclude_patterns, optarg);
+ break;
default:
/* getopt_long already emitted a complaint */
@@ -345,6 +374,13 @@ main(int argc, char **argv)
if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary)
pg_fatal("one of -d/--dbname and -f/--file must be specified");
+ if (db_exclude_patterns.head != NULL && globals_only)
+ {
+ pg_log_error("option --exclude-database cannot be used together with -g/--globals-only");
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
+ exit_nicely(1);
+ }
+
/* Should get at most one of -d and -f, else user is confused */
if (opts->cparams.dbname)
{
@@ -452,6 +488,114 @@ main(int argc, char **argv)
opts->formatName);
}
+ /*
+ * If toc.dat file is not present in the current path, then check for
+ * global.dat. If global.dat file is present, then restore all the
+ * databases from map.dat (if it exists), but skip restoring those
+ * matching --exclude-database patterns.
+ */
+ if (inputFileSpec != NULL && !file_exists_in_directory(inputFileSpec, "toc.dat") &&
+ file_exists_in_directory(inputFileSpec, "global.dat"))
+ {
+ PGconn *conn = NULL; /* Connection to restore global sql
+ * commands. */
+
+ /*
+ * Can only use --list or --use-list options with a single database
+ * dump.
+ */
+ if (opts->tocSummary)
+ pg_fatal("option -l/--list cannot be used when restoring an archive created with pg_dumpall");
+ else if (opts->tocFile)
+ pg_fatal("option -L/--use-list cannot be used when restoring an archive created with pg_dumpall");
+
+ /*
+ * To restore from a pg_dumpall archive, -C (create database) option
+ * must be specified unless we are only restoring globals.
+ */
+ if (!globals_only && opts->createDB != 1)
+ {
+ pg_log_error("-C/--create option should be specified when restoring from an archive of created by pg_dumpall");
+ pg_log_error_hint("Try \"%s --help\" for more information.", progname);
+ pg_log_error_hint("Individual databases can be restored using their specific archives.");
+ exit_nicely(1);
+ }
+
+ /*
+ * Connect to the database to execute global sql commands from
+ * global.dat file.
+ */
+ if (opts->cparams.dbname)
+ {
+ conn = ConnectDatabase(opts->cparams.dbname, NULL, opts->cparams.pghost,
+ opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
+ false, progname, NULL, NULL, NULL, NULL);
+
+
+ if (!conn)
+ pg_fatal("could not connect to database \"%s\"", opts->cparams.dbname);
+ }
+
+ /* If globals-only, then return from here. */
+ if (globals_only)
+ {
+ /*
+ * Open global.dat file and execute/append all the global sql
+ * commands.
+ */
+ n_errors = process_global_sql_commands(conn, inputFileSpec,
+ opts->filename);
+
+ if (conn)
+ PQfinish(conn);
+
+ pg_log_info("database restoring skipped as -g/--globals-only option was specified");
+ }
+ else
+ {
+ /* Now restore all the databases from map.dat */
+ n_errors = restore_all_databases(conn, inputFileSpec, db_exclude_patterns,
+ opts, numWorkers);
+ }
+
+ /* Free db pattern list. */
+ simple_string_list_destroy(&db_exclude_patterns);
+ }
+ else /* process if global.dat file does not exist. */
+ {
+ if (db_exclude_patterns.head != NULL)
+ pg_fatal("option --exclude-database can be used only when restoring an archive created by pg_dumpall");
+
+ if (globals_only)
+ pg_fatal("option -g/--globals-only can be used only when restoring an archive created by pg_dumpall");
+
+ n_errors = restore_one_database(inputFileSpec, opts, numWorkers, false, 0);
+ }
+
+ /* Done, print a summary of ignored errors during restore. */
+ if (n_errors)
+ {
+ pg_log_warning("errors ignored on restore: %d", n_errors);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * restore_one_database
+ *
+ * This will restore one database using toc.dat file.
+ *
+ * returns the number of errors while doing restore.
+ */
+static int
+restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
+ int numWorkers, bool append_data, int num)
+{
+ Archive *AH;
+ int n_errors;
+
AH = OpenArchive(inputFileSpec, opts->format);
SetArchiveOptions(AH, NULL, opts);
@@ -459,9 +603,15 @@ main(int argc, char **argv)
/*
* We don't have a connection yet but that doesn't matter. The connection
* is initialized to NULL and if we terminate through exit_nicely() while
- * it's still NULL, the cleanup function will just be a no-op.
+ * it's still NULL, the cleanup function will just be a no-op. If we are
+ * restoring multiple databases, then only update AX handle for cleanup as
+ * the previous entry was already in the array and we had closed previous
+ * connection, so we can use the same array slot.
*/
- on_exit_close_archive(AH);
+ if (!append_data || num == 0)
+ on_exit_close_archive(AH);
+ else
+ replace_on_exit_close_archive(AH);
/* Let the archiver know how noisy to be */
AH->verbose = opts->verbose;
@@ -481,25 +631,22 @@ main(int argc, char **argv)
else
{
ProcessArchiveRestoreOptions(AH);
- RestoreArchive(AH);
+ RestoreArchive(AH, append_data);
}
- /* done, print a summary of ignored errors */
- if (AH->n_errors)
- pg_log_warning("errors ignored on restore: %d", AH->n_errors);
+ n_errors = AH->n_errors;
/* AH may be freed in CloseArchive? */
- exit_code = AH->n_errors ? 1 : 0;
-
CloseArchive(AH);
- return exit_code;
+ return n_errors;
}
static void
usage(const char *progname)
{
- printf(_("%s restores a PostgreSQL database from an archive created by pg_dump.\n\n"), progname);
+ printf(_("%s restores a PostgreSQL database from an archive created by pg_dump or pg_dumpall.\n"
+ "If the archive is created by pg_dumpall, then restores multiple databases also.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [FILE]\n"), progname);
@@ -517,6 +664,7 @@ usage(const char *progname)
printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
printf(_(" -C, --create create the target database\n"));
printf(_(" -e, --exit-on-error exit on error, default is to continue\n"));
+ printf(_(" -g, --globals-only restore only global objects, no databases\n"));
printf(_(" -I, --index=NAME restore named index\n"));
printf(_(" -j, --jobs=NUM use this many parallel jobs to restore\n"));
printf(_(" -L, --use-list=FILENAME use table of contents from this file for\n"
@@ -529,6 +677,7 @@ usage(const char *progname)
printf(_(" -S, --superuser=NAME superuser user name to use for disabling triggers\n"));
printf(_(" -t, --table=NAME restore named relation (table, view, etc.)\n"));
printf(_(" -T, --trigger=NAME restore named trigger\n"));
+ printf(_(" --exclude-database=PATTERN exclude databases whose name matches with pattern\n"));
printf(_(" -x, --no-privileges skip restoration of access privileges (grant/revoke)\n"));
printf(_(" -1, --single-transaction restore as a single transaction\n"));
printf(_(" --disable-triggers disable triggers during data-only restore\n"));
@@ -569,8 +718,8 @@ usage(const char *progname)
printf(_(" --role=ROLENAME do SET ROLE before restore\n"));
printf(_("\n"
- "The options -I, -n, -N, -P, -t, -T, and --section can be combined and specified\n"
- "multiple times to select multiple objects.\n"));
+ "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be combined\n"
+ "and specified multiple times to select multiple objects.\n"));
printf(_("\nIf no input file name is supplied, then standard input is used.\n\n"));
printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
@@ -675,3 +824,614 @@ read_restore_filters(const char *filename, RestoreOptions *opts)
filter_free(&fstate);
}
+
+/*
+ * file_exists_in_directory
+ *
+ * Returns true if the file exists in the given directory.
+ */
+static bool
+file_exists_in_directory(const char *dir, const char *filename)
+{
+ struct stat st;
+ char buf[MAXPGPATH];
+
+ if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH)
+ pg_fatal("directory name too long: \"%s\"", dir);
+
+ return (stat(buf, &st) == 0 && S_ISREG(st.st_mode));
+}
+
+/*
+ * read_one_statement
+ *
+ * This will start reading from passed file pointer using fgetc and read till
+ * semicolon(sql statement terminator for global.dat file)
+ *
+ * EOF is returned if end-of-file input is seen; time to shut down.
+ */
+
+static int
+read_one_statement(StringInfo inBuf, FILE *pfile)
+{
+ int c; /* character read from getc() */
+ int m;
+
+ StringInfoData q;
+
+ initStringInfo(&q);
+
+ resetStringInfo(inBuf);
+
+ /*
+ * Read characters until EOF or the appropriate delimiter is seen.
+ */
+ while ((c = fgetc(pfile)) != EOF)
+ {
+ if (c != '\'' && c != '"' && c != '\n' && c != ';')
+ {
+ appendStringInfoChar(inBuf, (char) c);
+ while ((c = fgetc(pfile)) != EOF)
+ {
+ if (c != '\'' && c != '"' && c != ';' && c != '\n')
+ appendStringInfoChar(inBuf, (char) c);
+ else
+ break;
+ }
+ }
+
+ if (c == '\'' || c == '"')
+ {
+ appendStringInfoChar(&q, (char) c);
+ m = c;
+
+ while ((c = fgetc(pfile)) != EOF)
+ {
+ appendStringInfoChar(&q, (char) c);
+
+ if (c == m)
+ {
+ appendStringInfoString(inBuf, q.data);
+ resetStringInfo(&q);
+ break;
+ }
+ }
+ }
+
+ if (c == ';')
+ {
+ appendStringInfoChar(inBuf, (char) ';');
+ break;
+ }
+
+ if (c == '\n')
+ appendStringInfoChar(inBuf, (char) '\n');
+ }
+
+ /* No input before EOF signal means time to quit. */
+ if (c == EOF && inBuf->len == 0)
+ return EOF;
+
+ /* return something that's not EOF */
+ return 'Q';
+}
+
+/*
+ * get_dbnames_list_to_restore
+ *
+ * This will mark for skipping any entries from dbname_oid_list that pattern match an
+ * entry in the db_exclude_patterns list.
+ *
+ * Returns the number of database to be restored.
+ *
+ */
+static int
+get_dbnames_list_to_restore(PGconn *conn,
+ SimpleOidStringList *dbname_oid_list,
+ SimpleStringList db_exclude_patterns)
+{
+ int count_db = 0;
+ PQExpBuffer query;
+ PGresult *res;
+
+ query = createPQExpBuffer();
+
+ if (!conn)
+ pg_log_info("considering PATTERN as NAME for --exclude-database option as no db connection while doing pg_restore.");
+
+ /*
+ * Process one by one all dbnames and if specified to skip restoring, then
+ * remove dbname from list.
+ */
+ for (SimpleOidStringListCell * db_cell = dbname_oid_list->head;
+ db_cell; db_cell = db_cell->next)
+ {
+ bool skip_db_restore = false;
+
+ for (SimpleStringListCell *pat_cell = db_exclude_patterns.head; pat_cell; pat_cell = pat_cell->next)
+ {
+ /*
+ * the construct pattern matching query: SELECT 1 WHERE XXX
+ * OPERATOR(pg_catalog.~) '^(PATTERN)$' COLLATE pg_catalog.default
+ *
+ * XXX represents the string literal database name derived from
+ * the dbname_oid_list, which is initially extracted from the
+ * map.dat file located in the backup directory. that's why we
+ * need quote_literal_cstr.
+ *
+ * If no db connection, then consider PATTERN as NAME.
+ */
+ if (pg_strcasecmp(db_cell->str, pat_cell->val) == 0)
+ skip_db_restore = true;
+ else if (conn)
+ {
+ int dotcnt;
+
+ appendPQExpBufferStr(query, "SELECT 1 ");
+ processSQLNamePattern(conn, query, pat_cell->val, false,
+ false, NULL, quote_literal_cstr(db_cell->str),
+ NULL, NULL, NULL, &dotcnt);
+
+ if (dotcnt > 0)
+ {
+ pg_log_error("improper qualified name (too many dotted names): %s",
+ db_cell->str);
+ PQfinish(conn);
+ exit_nicely(1);
+ }
+
+ res = executeQuery(conn, query->data);
+
+ if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res))
+ {
+ skip_db_restore = true;
+ pg_log_info("database \"%s\" matches exclude pattern: \"%s\"", db_cell->str, pat_cell->val);
+ }
+
+ PQclear(res);
+ resetPQExpBuffer(query);
+ }
+
+ if (skip_db_restore)
+ break;
+ }
+
+ /* Increment count if database needs to be restored. */
+ if (skip_db_restore)
+ {
+ pg_log_info("excluding database \"%s\"", db_cell->str);
+ db_cell->oid = InvalidOid;
+ }
+ else
+ {
+ count_db++;
+ }
+ }
+
+ return count_db;
+}
+
+/*
+ * get_dbname_oid_list_from_mfile
+ *
+ * Open map.dat file and read line by line and then prepare a list of database
+ * names and corresponding db_oid.
+ *
+ * Returns, total number of database names in map.dat file.
+ */
+static int
+get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimpleOidStringList *dbname_oid_list)
+{
+ FILE *pfile;
+ char map_file_path[MAXPGPATH];
+ char line[MAXPGPATH];
+ int count = 0;
+
+ /*
+ * If there is only global.dat file in dump, then return from here as
+ * there is no database to restore.
+ */
+ if (!file_exists_in_directory(dumpdirpath, "map.dat"))
+ {
+ pg_log_info("databases restoring is skipped as map.dat file is not present in \"%s\"", dumpdirpath);
+ return 0;
+ }
+
+ snprintf(map_file_path, MAXPGPATH, "%s/map.dat", dumpdirpath);
+
+ /* Open map.dat file. */
+ pfile = fopen(map_file_path, PG_BINARY_R);
+
+ if (pfile == NULL)
+ pg_fatal("could not open map.dat file: \"%s\"", map_file_path);
+
+ /* Append all the dbname/db_oid combinations to the list. */
+ while ((fgets(line, MAXPGPATH, pfile)) != NULL)
+ {
+ Oid db_oid = InvalidOid;
+ char db_oid_str[MAXPGPATH + 1] = "";
+ char dbname[MAXPGPATH + 1] = "";
+
+ /* Extract dboid. */
+ sscanf(line, "%u", &db_oid);
+ sscanf(line, "%20s", db_oid_str);
+
+ /* Now copy dbname. */
+ strcpy(dbname, line + strlen(db_oid_str) + 1);
+
+ /* Remove \n from dbname. */
+ dbname[strlen(dbname) - 1] = '\0';
+
+ pg_log_info("found database \"%s\" (OID: %u) in map.dat file while restoring.", dbname, db_oid);
+
+ /* Report error and exit if the file has any corrupted data. */
+ if (!OidIsValid(db_oid) || strlen(dbname) == 0)
+ pg_fatal("invalid entry in map.dat file at line : %d", count + 1);
+
+ simple_oid_string_list_append(dbname_oid_list, db_oid, dbname);
+ count++;
+ }
+
+ /* Close map.dat file. */
+ fclose(pfile);
+
+ return count;
+}
+
+/*
+ * restore_all_databases
+ *
+ * This will restore databases those dumps are present in
+ * directory based on map.dat file mapping.
+ *
+ * This will skip restoring for databases that are specified with
+ * exclude-database option.
+ *
+ * returns, number of errors while doing restore.
+ */
+static int
+restore_all_databases(PGconn *conn, const char *dumpdirpath,
+ SimpleStringList db_exclude_patterns, RestoreOptions *opts,
+ int numWorkers)
+{
+ SimpleOidStringList dbname_oid_list = {NULL, NULL};
+ int num_db_restore = 0;
+ int num_total_db;
+ int n_errors_total;
+ int count = 0;
+ char *connected_db = NULL;
+ bool dumpData = opts->dumpData;
+ bool dumpSchema = opts->dumpSchema;
+ bool dumpStatistics = opts->dumpSchema;
+
+ /* Save db name to reuse it for all the database. */
+ if (opts->cparams.dbname)
+ connected_db = opts->cparams.dbname;
+
+ num_total_db = get_dbname_oid_list_from_mfile(dumpdirpath, &dbname_oid_list);
+
+ /*
+ * If map.dat has no entry, return from here after processing global.dat
+ * file.
+ */
+ if (dbname_oid_list.head == NULL)
+ return process_global_sql_commands(conn, dumpdirpath, opts->filename);
+
+ pg_log_info("found total %d database names in map.dat file", num_total_db);
+
+ if (!conn)
+ {
+ pg_log_info("trying to connect database \"postgres\" to dump into out file");
+
+ conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost,
+ opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
+ false, progname, NULL, NULL, NULL, NULL);
+
+ /* Try with template1. */
+ if (!conn)
+ {
+ pg_log_info("trying to connect database \"template1\" as failed to connect to database \"postgres\" to dump into out file");
+
+ conn = ConnectDatabase("template1", NULL, opts->cparams.pghost,
+ opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
+ false, progname, NULL, NULL, NULL, NULL);
+ }
+ }
+
+ /*
+ * processing pg_restore --exclude-database=PATTERN/NAME if no connection.
+ */
+ num_db_restore = get_dbnames_list_to_restore(conn, &dbname_oid_list,
+ db_exclude_patterns);
+
+ /* Open global.dat file and execute/append all the global sql commands. */
+ n_errors_total = process_global_sql_commands(conn, dumpdirpath, opts->filename);
+
+ /* Close the db connection as we are done with globals and patterns. */
+ if (conn)
+ PQfinish(conn);
+
+ /* Exit if no db needs to be restored. */
+ if (dbname_oid_list.head == NULL || num_db_restore == 0)
+ {
+ pg_log_info("no database needs to restore out of %d databases", num_total_db);
+ return n_errors_total;
+ }
+
+ pg_log_info("needs to restore %d databases out of %d databases", num_db_restore, num_total_db);
+
+ /*
+ * Till now, we made a list of databases, those needs to be restored after
+ * skipping names of exclude-database. Now we can launch parallel workers
+ * to restore these databases.
+ */
+ for (SimpleOidStringListCell * db_cell = dbname_oid_list.head;
+ db_cell; db_cell = db_cell->next)
+ {
+ char subdirpath[MAXPGPATH];
+ char subdirdbpath[MAXPGPATH];
+ char dbfilename[MAXPGPATH];
+ int n_errors;
+
+ /* ignore dbs marked for skipping */
+ if (db_cell->oid == InvalidOid)
+ continue;
+
+ /*
+ * We need to reset override_dbname so that objects can be restored
+ * into already created database. (used with -d/--dbname option)
+ */
+ if (opts->cparams.override_dbname)
+ {
+ pfree(opts->cparams.override_dbname);
+ opts->cparams.override_dbname = NULL;
+ }
+
+ snprintf(subdirdbpath, MAXPGPATH, "%s/databases", dumpdirpath);
+
+ /*
+ * Look for the database dump file/dir. If there is an {oid}.tar or
+ * {oid}.dmp file, use it. Otherwise try to use a directory called
+ * {oid}
+ */
+ snprintf(dbfilename, MAXPGPATH, "%u.tar", db_cell->oid);
+ if (file_exists_in_directory(subdirdbpath, dbfilename))
+ snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.tar", dumpdirpath, db_cell->oid);
+ else
+ {
+ snprintf(dbfilename, MAXPGPATH, "%u.dmp", db_cell->oid);
+
+ if (file_exists_in_directory(subdirdbpath, dbfilename))
+ snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.dmp", dumpdirpath, db_cell->oid);
+ else
+ snprintf(subdirpath, MAXPGPATH, "%s/databases/%u", dumpdirpath, db_cell->oid);
+ }
+
+ pg_log_info("restoring database \"%s\"", db_cell->str);
+
+ /* If database is already created, then don't set createDB flag. */
+ if (opts->cparams.dbname)
+ {
+ PGconn *test_conn;
+
+ test_conn = ConnectDatabase(db_cell->str, NULL, opts->cparams.pghost,
+ opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
+ false, progname, NULL, NULL, NULL, NULL);
+ if (test_conn)
+ {
+ PQfinish(test_conn);
+
+ /* Use already created database for connection. */
+ opts->createDB = 0;
+ opts->cparams.dbname = db_cell->str;
+ }
+ else
+ {
+ /* we'll have to create it */
+ opts->createDB = 1;
+ opts->cparams.dbname = connected_db;
+ }
+ }
+
+ /*
+ * Reset flags - might have been reset in pg_backup_archiver.c by the
+ * previous restore.
+ */
+ opts->dumpData = dumpData;
+ opts->dumpSchema = dumpSchema;
+ opts->dumpStatistics = dumpStatistics;
+
+ /* Restore single database. */
+ n_errors = restore_one_database(subdirpath, opts, numWorkers, true, count);
+
+ /* Print a summary of ignored errors during single database restore. */
+ if (n_errors)
+ {
+ n_errors_total += n_errors;
+ pg_log_warning("errors ignored on database \"%s\" restore: %d", db_cell->str, n_errors);
+ }
+
+ count++;
+ }
+
+ /* Log number of processed databases. */
+ pg_log_info("number of restored databases are %d", num_db_restore);
+
+ /* Free dbname and dboid list. */
+ simple_oid_string_list_destroy(&dbname_oid_list);
+
+ return n_errors_total;
+}
+
+/*
+ * process_global_sql_commands
+ *
+ * This will open global.dat file and will execute all global sql commands one
+ * by one statement.
+ * Semicolon is considered as statement terminator. If outfile is passed, then
+ * this will copy all sql commands into outfile rather then executing them.
+ *
+ * returns the number of errors while processing global.dat
+ */
+static int
+process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *outfile)
+{
+ char global_file_path[MAXPGPATH];
+ PGresult *result;
+ StringInfoData sqlstatement,
+ user_create;
+ FILE *pfile;
+ int n_errors = 0;
+
+ snprintf(global_file_path, MAXPGPATH, "%s/global.dat", dumpdirpath);
+
+ /* Open global.dat file. */
+ pfile = fopen(global_file_path, PG_BINARY_R);
+
+ if (pfile == NULL)
+ pg_fatal("could not open global.dat file: \"%s\"", global_file_path);
+
+ /*
+ * If outfile is given, then just copy all global.dat file data into
+ * outfile.
+ */
+ if (outfile)
+ {
+ copy_or_print_global_file(outfile, pfile);
+ return 0;
+ }
+
+ /* Init sqlstatement to append commands. */
+ initStringInfo(&sqlstatement);
+
+ /* creation statement for our current role */
+ initStringInfo(&user_create);
+ appendStringInfoString(&user_create, "CREATE ROLE ");
+ /* should use fmtId here, but we don't know the encoding */
+ appendStringInfoString(&user_create, PQuser(conn));
+ appendStringInfoString(&user_create, ";");
+
+ /* Process file till EOF and execute sql statements. */
+ while (read_one_statement(&sqlstatement, pfile) != EOF)
+ {
+ /* don't try to create the role we are connected as */
+ if (strstr(sqlstatement.data, user_create.data))
+ continue;
+
+ pg_log_info("executing query: %s", sqlstatement.data);
+ result = PQexec(conn, sqlstatement.data);
+
+ switch (PQresultStatus(result))
+ {
+ case PGRES_COMMAND_OK:
+ case PGRES_TUPLES_OK:
+ case PGRES_EMPTY_QUERY:
+ break;
+ default:
+ n_errors++;
+ pg_log_error("could not execute query: \"%s\" \nCommand was: \"%s\"", PQerrorMessage(conn), sqlstatement.data);
+ }
+ PQclear(result);
+ }
+
+ /* Print a summary of ignored errors during global.dat. */
+ if (n_errors)
+ pg_log_warning("errors ignored on global.dat file restore: %d", n_errors);
+
+ fclose(pfile);
+
+ return n_errors;
+}
+
+/*
+ * copy_or_print_global_file
+ *
+ * This will copy global.dat file into the output file. If "-" is used as outfile,
+ * then print commands to stdout.
+ */
+static void
+copy_or_print_global_file(const char *outfile, FILE *pfile)
+{
+ char out_file_path[MAXPGPATH];
+ FILE *OPF;
+ int c;
+
+ /* "-" is used for stdout. */
+ if (strcmp(outfile, "-") == 0)
+ OPF = stdout;
+ else
+ {
+ snprintf(out_file_path, MAXPGPATH, "%s", outfile);
+ OPF = fopen(out_file_path, PG_BINARY_W);
+
+ if (OPF == NULL)
+ {
+ fclose(pfile);
+ pg_fatal("could not open file: \"%s\"", outfile);
+ }
+ }
+
+ /* Append global.dat into output file or print to stdout. */
+ while ((c = fgetc(pfile)) != EOF)
+ fputc(c, OPF);
+
+ fclose(pfile);
+
+ /* Close output file. */
+ if (strcmp(outfile, "-") != 0)
+ fclose(OPF);
+}
+
+/*
+ * quote_literal_internal
+ */
+static size_t
+quote_literal_internal(char *dst, const char *src, size_t len)
+{
+ const char *s;
+ char *savedst = dst;
+
+ for (s = src; s < src + len; s++)
+ {
+ if (*s == '\\')
+ {
+ *dst++ = ESCAPE_STRING_SYNTAX;
+ break;
+ }
+ }
+
+ *dst++ = '\'';
+ while (len-- > 0)
+ {
+ if (SQL_STR_DOUBLE(*src, true))
+ *dst++ = *src;
+ *dst++ = *src++;
+ }
+ *dst++ = '\'';
+
+ return dst - savedst;
+}
+
+/*
+ * quote_literal_cstr
+ *
+ * returns a properly quoted literal
+ * copied from src/backend/utils/adt/quote.c
+ */
+static char *
+quote_literal_cstr(const char *rawstr)
+{
+ char *result;
+ int len;
+ int newlen;
+
+ len = strlen(rawstr);
+
+ /* We make a worst-case result area; wasting a little space is OK */
+ result = pg_malloc(len * 2 + 3 + 1);
+
+ newlen = quote_literal_internal(result, rawstr, len);
+ result[newlen] = '\0';
+
+ return result;
+}
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 37d893d5e6a..0bbcdbe84a7 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -237,6 +237,11 @@ command_fails_like(
'pg_restore: options -C\/--create and -1\/--single-transaction cannot be used together'
);
+command_fails_like(
+ [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
+ qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
+ 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only');
+
# also fails for -r and -t, but it seems pointless to add more tests for those.
command_fails_like(
[ 'pg_dumpall', '--exclude-database=foo', '--globals-only' ],
@@ -244,4 +249,8 @@ command_fails_like(
'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only'
);
+command_fails_like(
+ [ 'pg_dumpall', '--format', 'x' ],
+ qr/\Qpg_dumpall: error: unrecognized archive format "x";\E/,
+ 'pg_dumpall: unrecognized archive format');
done_testing();