aboutsummaryrefslogtreecommitdiff
path: root/src/bin
diff options
context:
space:
mode:
Diffstat (limited to 'src/bin')
-rw-r--r--src/bin/initdb/t/001_initdb.pl3
-rw-r--r--src/bin/pg_amcheck/t/004_verify_heapam.pl4
-rw-r--r--src/bin/pg_basebackup/meson.build6
-rw-r--r--src/bin/pg_basebackup/pg_createsubscriber.c42
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c20
-rw-r--r--src/bin/pg_basebackup/t/030_pg_recvlogical.pl7
-rw-r--r--src/bin/pg_basebackup/t/040_pg_createsubscriber.pl10
-rw-r--r--src/bin/pg_combinebackup/t/010_hardlink.pl92
-rw-r--r--src/bin/pg_dump/meson.build6
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c4
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c11
-rw-r--r--src/bin/pg_dump/pg_dump.c142
-rw-r--r--src/bin/pg_dump/pg_dump.h1
-rw-r--r--src/bin/pg_dump/pg_dumpall.c10
-rw-r--r--src/bin/pg_dump/pg_restore.c49
-rw-r--r--src/bin/pg_dump/t/001_basic.pl13
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl100
-rw-r--r--src/bin/pg_dump/t/006_pg_dumpall.pl53
-rw-r--r--src/bin/pg_rewind/t/RewindTest.pm2
-rw-r--r--src/bin/pg_upgrade/check.c5
-rw-r--r--src/bin/pg_upgrade/dump.c2
-rw-r--r--src/bin/pg_upgrade/relfilenumber.c12
-rw-r--r--src/bin/pg_upgrade/t/004_subscription.pl6
-rw-r--r--src/bin/pg_upgrade/t/005_char_signedness.pl2
-rw-r--r--src/bin/pg_upgrade/t/006_transfer_modes.pl28
-rw-r--r--src/bin/pg_upgrade/task.c5
-rw-r--r--src/bin/pg_verifybackup/meson.build8
-rw-r--r--src/bin/pg_verifybackup/t/008_untar.pl22
-rw-r--r--src/bin/pg_verifybackup/t/010_client_untar.pl22
-rw-r--r--src/bin/pgbench/t/002_pgbench_no_server.pl18
-rw-r--r--src/bin/psql/command.c39
-rw-r--r--src/bin/psql/common.c29
-rw-r--r--src/bin/psql/describe.c9
-rw-r--r--src/bin/psql/help.c14
-rw-r--r--src/bin/psql/t/001_basic.pl38
-rw-r--r--src/bin/psql/tab-complete.in.c36
-rw-r--r--src/bin/psql/variables.c10
-rw-r--r--src/bin/scripts/t/100_vacuumdb.pl79
38 files changed, 653 insertions, 306 deletions
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index 15dd10ce40a..b7ef7ed8d06 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -76,7 +76,8 @@ command_like(
'checksums are enabled in control file');
command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
-command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], '--no-sync-data-files');
+command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ],
+ '--no-sync-data-files');
command_fails([ 'initdb', $datadir ], 'existing data directory');
if ($supports_syncfs)
diff --git a/src/bin/pg_amcheck/t/004_verify_heapam.pl b/src/bin/pg_amcheck/t/004_verify_heapam.pl
index 2a3af2666f5..72693660fb6 100644
--- a/src/bin/pg_amcheck/t/004_verify_heapam.pl
+++ b/src/bin/pg_amcheck/t/004_verify_heapam.pl
@@ -529,7 +529,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
$tup->{t_infomask2} |= HEAP_NATTS_MASK;
push @expected,
- qr/${$header}number of attributes 2047 exceeds maximum expected for table 3/;
+ qr/${$header}number of attributes 2047 exceeds maximum 3 expected for table/;
}
elsif ($offnum == 10)
{
@@ -552,7 +552,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
$tup->{t_hoff} = 32;
push @expected,
- qr/${$header}number of attributes 67 exceeds maximum expected for table 3/;
+ qr/${$header}number of attributes 67 exceeds maximum 3 expected for table/;
}
elsif ($offnum == 12)
{
diff --git a/src/bin/pg_basebackup/meson.build b/src/bin/pg_basebackup/meson.build
index 8a1c96b4f5c..3a7fc10eab0 100644
--- a/src/bin/pg_basebackup/meson.build
+++ b/src/bin/pg_basebackup/meson.build
@@ -93,9 +93,9 @@ tests += {
'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(),
'tap': {
- 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
- 'TAR': tar.found() ? tar.path() : '',
- 'LZ4': program_lz4.found() ? program_lz4.path() : '',
+ 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '',
+ 'TAR': tar.found() ? tar.full_path() : '',
+ 'LZ4': program_lz4.found() ? program_lz4.full_path() : '',
},
'tests': [
't/010_pg_basebackup.pl',
diff --git a/src/bin/pg_basebackup/pg_createsubscriber.c b/src/bin/pg_basebackup/pg_createsubscriber.c
index f65acc7cb11..11f71c03801 100644
--- a/src/bin/pg_basebackup/pg_createsubscriber.c
+++ b/src/bin/pg_basebackup/pg_createsubscriber.c
@@ -46,7 +46,7 @@ struct CreateSubscriberOptions
SimpleStringList replslot_names; /* list of replication slot names */
int recovery_timeout; /* stop recovery after this time */
bool all_dbs; /* all option */
- SimpleStringList objecttypes_to_remove; /* list of object types to remove */
+ SimpleStringList objecttypes_to_clean; /* list of object types to cleanup */
};
/* per-database publication/subscription info */
@@ -71,8 +71,8 @@ struct LogicalRepInfos
{
struct LogicalRepInfo *dbinfo;
bool two_phase; /* enable-two-phase option */
- bits32 objecttypes_to_remove; /* flags indicating which object types
- * to remove on subscriber */
+ bits32 objecttypes_to_clean; /* flags indicating which object types
+ * to clean up on subscriber */
};
static void cleanup_objects_atexit(void);
@@ -247,19 +247,19 @@ usage(void)
printf(_(" %s [OPTION]...\n"), progname);
printf(_("\nOptions:\n"));
printf(_(" -a, --all create subscriptions for all databases except template\n"
- " databases or databases that don't allow connections\n"));
+ " databases and databases that don't allow connections\n"));
printf(_(" -d, --database=DBNAME database in which to create a subscription\n"));
printf(_(" -D, --pgdata=DATADIR location for the subscriber data directory\n"));
printf(_(" -n, --dry-run dry run, just show what would be done\n"));
printf(_(" -p, --subscriber-port=PORT subscriber port number (default %s)\n"), DEFAULT_SUB_PORT);
printf(_(" -P, --publisher-server=CONNSTR publisher connection string\n"));
- printf(_(" -R, --remove=OBJECTTYPE remove all objects of the specified type from specified\n"
- " databases on the subscriber; accepts: publications\n"));
printf(_(" -s, --socketdir=DIR socket directory to use (default current dir.)\n"));
printf(_(" -t, --recovery-timeout=SECS seconds to wait for recovery to end\n"));
printf(_(" -T, --enable-two-phase enable two-phase commit for all subscriptions\n"));
printf(_(" -U, --subscriber-username=NAME user name for subscriber connection\n"));
printf(_(" -v, --verbose output verbose messages\n"));
+ printf(_(" --clean=OBJECTTYPE drop all objects of the specified type from specified\n"
+ " databases on the subscriber; accepts: \"%s\"\n"), "publications");
printf(_(" --config-file=FILENAME use specified main server configuration\n"
" file when running target cluster\n"));
printf(_(" --publication=NAME publication name\n"));
@@ -973,7 +973,7 @@ check_publisher(const struct LogicalRepInfo *dbinfo)
pg_log_warning("two_phase option will not be enabled for replication slots");
pg_log_warning_detail("Subscriptions will be created with the two_phase option disabled. "
"Prepared transactions will be replicated at COMMIT PREPARED.");
- pg_log_warning_hint("You can use --enable-two-phase switch to enable two_phase.");
+ pg_log_warning_hint("You can use the command-line option --enable-two-phase to enable two_phase.");
}
/*
@@ -1730,7 +1730,7 @@ static void
check_and_drop_publications(PGconn *conn, struct LogicalRepInfo *dbinfo)
{
PGresult *res;
- bool drop_all_pubs = dbinfos.objecttypes_to_remove & OBJECTTYPE_PUBLICATIONS;
+ bool drop_all_pubs = dbinfos.objecttypes_to_clean & OBJECTTYPE_PUBLICATIONS;
Assert(conn != NULL);
@@ -2026,7 +2026,6 @@ main(int argc, char **argv)
{"dry-run", no_argument, NULL, 'n'},
{"subscriber-port", required_argument, NULL, 'p'},
{"publisher-server", required_argument, NULL, 'P'},
- {"remove", required_argument, NULL, 'R'},
{"socketdir", required_argument, NULL, 's'},
{"recovery-timeout", required_argument, NULL, 't'},
{"enable-two-phase", no_argument, NULL, 'T'},
@@ -2038,6 +2037,7 @@ main(int argc, char **argv)
{"publication", required_argument, NULL, 2},
{"replication-slot", required_argument, NULL, 3},
{"subscription", required_argument, NULL, 4},
+ {"clean", required_argument, NULL, 5},
{NULL, 0, NULL, 0}
};
@@ -2109,7 +2109,7 @@ main(int argc, char **argv)
get_restricted_token();
- while ((c = getopt_long(argc, argv, "ad:D:np:P:R:s:t:TU:v",
+ while ((c = getopt_long(argc, argv, "ad:D:np:P:s:t:TU:v",
long_options, &option_index)) != -1)
{
switch (c)
@@ -2139,12 +2139,6 @@ main(int argc, char **argv)
case 'P':
opt.pub_conninfo_str = pg_strdup(optarg);
break;
- case 'R':
- if (!simple_string_list_member(&opt.objecttypes_to_remove, optarg))
- simple_string_list_append(&opt.objecttypes_to_remove, optarg);
- else
- pg_fatal("object type \"%s\" is specified more than once for -R/--remove", optarg);
- break;
case 's':
opt.socket_dir = pg_strdup(optarg);
canonicalize_path(opt.socket_dir);
@@ -2191,6 +2185,12 @@ main(int argc, char **argv)
else
pg_fatal("subscription \"%s\" specified more than once for --subscription", optarg);
break;
+ case 5:
+ if (!simple_string_list_member(&opt.objecttypes_to_clean, optarg))
+ simple_string_list_append(&opt.objecttypes_to_clean, optarg);
+ else
+ pg_fatal("object type \"%s\" specified more than once for --clean", optarg);
+ break;
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
@@ -2214,7 +2214,7 @@ main(int argc, char **argv)
if (bad_switch)
{
- pg_log_error("%s cannot be used with -a/--all", bad_switch);
+ pg_log_error("options %s and -a/--all cannot be used together", bad_switch);
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
@@ -2334,14 +2334,14 @@ main(int argc, char **argv)
}
/* Verify the object types specified for removal from the subscriber */
- for (SimpleStringListCell *cell = opt.objecttypes_to_remove.head; cell; cell = cell->next)
+ for (SimpleStringListCell *cell = opt.objecttypes_to_clean.head; cell; cell = cell->next)
{
if (pg_strcasecmp(cell->val, "publications") == 0)
- dbinfos.objecttypes_to_remove |= OBJECTTYPE_PUBLICATIONS;
+ dbinfos.objecttypes_to_clean |= OBJECTTYPE_PUBLICATIONS;
else
{
- pg_log_error("invalid object type \"%s\" specified for -R/--remove", cell->val);
- pg_log_error_hint("The valid option is: \"publications\"");
+ pg_log_error("invalid object type \"%s\" specified for --clean", cell->val);
+ pg_log_error_hint("The valid value is: \"%s\"", "publications");
exit(1);
}
}
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index e6810efe5f0..fb7a6a1d05d 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -41,8 +41,8 @@ typedef enum
/* Global Options */
static char *outfile = NULL;
static int verbose = 0;
-static bool two_phase = false;
-static bool failover = false;
+static bool two_phase = false; /* enable-two-phase option */
+static bool failover = false; /* enable-failover option */
static int noloop = 0;
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static int fsync_interval = 10 * 1000; /* 10 sec = default */
@@ -89,9 +89,9 @@ usage(void)
printf(_(" --drop-slot drop the replication slot (for the slot's name see --slot)\n"));
printf(_(" --start start streaming in a replication slot (for the slot's name see --slot)\n"));
printf(_("\nOptions:\n"));
+ printf(_(" --enable-failover enable replication slot synchronization to standby servers when\n"
+ " creating a replication slot\n"));
printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n"));
- printf(_(" --failover enable replication slot synchronization to standby servers when\n"
- " creating a slot\n"));
printf(_(" -f, --file=FILE receive log into this file, - for stdout\n"));
printf(_(" -F --fsync-interval=SECS\n"
" time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000));
@@ -105,7 +105,8 @@ usage(void)
printf(_(" -s, --status-interval=SECS\n"
" time between status packets sent to server (default: %d)\n"), (standby_message_timeout / 1000));
printf(_(" -S, --slot=SLOTNAME name of the logical replication slot\n"));
- printf(_(" -t, --two-phase enable decoding of prepared transactions when creating a slot\n"));
+ printf(_(" -t, --enable-two-phase enable decoding of prepared transactions when creating a slot\n"));
+ printf(_(" --two-phase (same as --enable-two-phase, deprecated)\n"));
printf(_(" -v, --verbose output verbose messages\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" -?, --help show this help, then exit\n"));
@@ -698,9 +699,10 @@ main(int argc, char **argv)
{"file", required_argument, NULL, 'f'},
{"fsync-interval", required_argument, NULL, 'F'},
{"no-loop", no_argument, NULL, 'n'},
- {"failover", no_argument, NULL, 5},
+ {"enable-failover", no_argument, NULL, 5},
+ {"enable-two-phase", no_argument, NULL, 't'},
+ {"two-phase", no_argument, NULL, 't'}, /* deprecated */
{"verbose", no_argument, NULL, 'v'},
- {"two-phase", no_argument, NULL, 't'},
{"version", no_argument, NULL, 'V'},
{"help", no_argument, NULL, '?'},
/* connection options */
@@ -928,14 +930,14 @@ main(int argc, char **argv)
{
if (two_phase)
{
- pg_log_error("--two-phase may only be specified with --create-slot");
+ pg_log_error("%s may only be specified with --create-slot", "--enable-two-phase");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (failover)
{
- pg_log_error("--failover may only be specified with --create-slot");
+ pg_log_error("%s may only be specified with --create-slot", "--enable-failover");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index c82e78847b3..1b7a6f6f43f 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -110,7 +110,7 @@ $node->command_fails(
'--dbname' => $node->connstr('postgres'),
'--start',
'--endpos' => $nextlsn,
- '--two-phase', '--no-loop',
+ '--enable-two-phase', '--no-loop',
'--file' => '-',
],
'incorrect usage');
@@ -142,12 +142,13 @@ $node->command_ok(
'--slot' => 'test',
'--dbname' => $node->connstr('postgres'),
'--create-slot',
- '--failover',
+ '--enable-failover',
],
'slot with failover created');
my $result = $node->safe_psql('postgres',
- "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'");
+ "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"
+);
is($result, 't', "failover is enabled for the new slot");
done_testing();
diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
index 2d532fee567..229fef5b3b5 100644
--- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
+++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
@@ -331,7 +331,7 @@ $node_p->safe_psql($db1,
$node_p->wait_for_replay_catchup($node_s);
# Create user-defined publications, wait for streaming replication to sync them
-# to the standby, then verify that '--remove'
+# to the standby, then verify that '--clean'
# removes them.
$node_p->safe_psql(
$db1, qq(
@@ -399,7 +399,7 @@ command_fails_like(
'--database' => $db1,
'--all',
],
- qr/--database cannot be used with -a\/--all/,
+ qr/options --database and -a\/--all cannot be used together/,
'fail if --database is used with --all');
# run pg_createsubscriber with '--publication' and '--all' and verify
@@ -416,7 +416,7 @@ command_fails_like(
'--all',
'--publication' => 'pub1',
],
- qr/--publication cannot be used with -a\/--all/,
+ qr/options --publication and -a\/--all cannot be used together/,
'fail if --publication is used with --all');
# run pg_createsubscriber with '--all' option
@@ -446,7 +446,7 @@ is(scalar(() = $stderr =~ /creating subscription/g),
# Run pg_createsubscriber on node S. --verbose is used twice
# to show more information.
# In passing, also test the --enable-two-phase option and
-# --remove option
+# --clean option
command_ok(
[
'pg_createsubscriber',
@@ -463,7 +463,7 @@ command_ok(
'--database' => $db1,
'--database' => $db2,
'--enable-two-phase',
- '--remove' => 'publications',
+ '--clean' => 'publications',
],
'run pg_createsubscriber on node S');
diff --git a/src/bin/pg_combinebackup/t/010_hardlink.pl b/src/bin/pg_combinebackup/t/010_hardlink.pl
index a0ee419090c..4f92d6676bd 100644
--- a/src/bin/pg_combinebackup/t/010_hardlink.pl
+++ b/src/bin/pg_combinebackup/t/010_hardlink.pl
@@ -56,7 +56,7 @@ $primary->command_ok(
'--pgdata' => $backup1path,
'--no-sync',
'--checkpoint' => 'fast',
- '--wal-method' => 'none'
+ '--wal-method' => 'none'
],
"full backup");
@@ -74,7 +74,7 @@ $primary->command_ok(
'--pgdata' => $backup2path,
'--no-sync',
'--checkpoint' => 'fast',
- '--wal-method' => 'none',
+ '--wal-method' => 'none',
'--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
@@ -112,45 +112,45 @@ done_testing();
# of the given data file.
sub check_data_file
{
- my ($data_file, $last_segment_nlinks) = @_;
-
- my @data_file_segments = ($data_file);
-
- # Start checking for additional segments
- my $segment_number = 1;
-
- while (1)
- {
- my $next_segment = $data_file . '.' . $segment_number;
-
- # If the file exists and is a regular file, add it to the list
- if (-f $next_segment)
- {
- push @data_file_segments, $next_segment;
- $segment_number++;
- }
- # Stop the loop if the file doesn't exist
- else
- {
- last;
- }
- }
-
- # All segments of the given data file should contain 2 hard links, except
- # for the last one, which should match the given number of links.
- my $last_segment = pop @data_file_segments;
-
- for my $segment (@data_file_segments)
- {
- # Get the file's stat information of each segment
- my $nlink_count = get_hard_link_count($segment);
- ok($nlink_count == 2, "File '$segment' has 2 hard links");
- }
-
- # Get the file's stat information of the last segment
- my $nlink_count = get_hard_link_count($last_segment);
- ok($nlink_count == $last_segment_nlinks,
- "File '$last_segment' has $last_segment_nlinks hard link(s)");
+ my ($data_file, $last_segment_nlinks) = @_;
+
+ my @data_file_segments = ($data_file);
+
+ # Start checking for additional segments
+ my $segment_number = 1;
+
+ while (1)
+ {
+ my $next_segment = $data_file . '.' . $segment_number;
+
+ # If the file exists and is a regular file, add it to the list
+ if (-f $next_segment)
+ {
+ push @data_file_segments, $next_segment;
+ $segment_number++;
+ }
+ # Stop the loop if the file doesn't exist
+ else
+ {
+ last;
+ }
+ }
+
+ # All segments of the given data file should contain 2 hard links, except
+ # for the last one, which should match the given number of links.
+ my $last_segment = pop @data_file_segments;
+
+ for my $segment (@data_file_segments)
+ {
+ # Get the file's stat information of each segment
+ my $nlink_count = get_hard_link_count($segment);
+ ok($nlink_count == 2, "File '$segment' has 2 hard links");
+ }
+
+ # Get the file's stat information of the last segment
+ my $nlink_count = get_hard_link_count($last_segment);
+ ok($nlink_count == $last_segment_nlinks,
+ "File '$last_segment' has $last_segment_nlinks hard link(s)");
}
@@ -159,11 +159,11 @@ sub check_data_file
# that file.
sub get_hard_link_count
{
- my ($file) = @_;
+ my ($file) = @_;
- # Get file stats
- my @stats = stat($file);
- my $nlink = $stats[3]; # Number of hard links
+ # Get file stats
+ my @stats = stat($file);
+ my $nlink = $stats[3]; # Number of hard links
- return $nlink;
+ return $nlink;
}
diff --git a/src/bin/pg_dump/meson.build b/src/bin/pg_dump/meson.build
index d8e9e101254..4a4ebbd8ec9 100644
--- a/src/bin/pg_dump/meson.build
+++ b/src/bin/pg_dump/meson.build
@@ -91,9 +91,9 @@ tests += {
'bd': meson.current_build_dir(),
'tap': {
'env': {
- 'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
- 'LZ4': program_lz4.found() ? program_lz4.path() : '',
- 'ZSTD': program_zstd.found() ? program_zstd.path() : '',
+ 'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '',
+ 'LZ4': program_lz4.found() ? program_lz4.full_path() : '',
+ 'ZSTD': program_zstd.found() ? program_zstd.full_path() : '',
'with_icu': icu.found() ? 'yes' : 'no',
},
'tests': [
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index afa42337b11..197c1295d93 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -152,7 +152,7 @@ InitDumpOptions(DumpOptions *opts)
opts->dumpSections = DUMP_UNSECTIONED;
opts->dumpSchema = true;
opts->dumpData = true;
- opts->dumpStatistics = true;
+ opts->dumpStatistics = false;
}
/*
@@ -2655,7 +2655,7 @@ WriteToc(ArchiveHandle *AH)
pg_fatal("unexpected TOC entry in WriteToc(): %d %s %s",
te->dumpId, te->desc, te->tag);
- if (fseeko(AH->FH, te->defnLen, SEEK_CUR != 0))
+ if (fseeko(AH->FH, te->defnLen, SEEK_CUR) != 0)
pg_fatal("error during file seek: %m");
}
else if (te->defnDumper)
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 21b00792a8a..bc2a2fb4797 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -412,10 +412,15 @@ _LoadLOs(ArchiveHandle *AH, TocEntry *te)
/*
* Note: before archive v16, there was always only one BLOBS TOC entry,
- * now there can be multiple. We don't need to worry what version we are
- * reading though, because tctx->filename should be correct either way.
+ * now there can be multiple. Furthermore, although the actual filename
+ * was always "blobs.toc" before v16, the value of tctx->filename did not
+ * match that before commit 548e50976 fixed it. For simplicity we assume
+ * it must be "blobs.toc" in all archives before v16.
*/
- setFilePath(AH, tocfname, tctx->filename);
+ if (AH->version < K_VERS_1_16)
+ setFilePath(AH, tocfname, "blobs.toc");
+ else
+ setFilePath(AH, tocfname, tctx->filename);
CFH = ctx->LOsTocFH = InitDiscoverCompressFileHandle(tocfname, PG_BINARY_R);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index e2e7975b34e..1937997ea67 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -350,7 +350,9 @@ static void buildMatViewRefreshDependencies(Archive *fout);
static void getTableDataFKConstraints(void);
static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
- int i_notnull_name, int i_notnull_invalidoid,
+ int i_notnull_name,
+ int i_notnull_comment,
+ int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
PQExpBuffer *invalidnotnulloids);
@@ -1235,7 +1237,7 @@ main(int argc, char **argv)
static void
help(const char *progname)
{
- printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
@@ -6890,7 +6892,8 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
(relkind == RELKIND_PARTITIONED_TABLE) ||
(relkind == RELKIND_INDEX) ||
(relkind == RELKIND_PARTITIONED_INDEX) ||
- (relkind == RELKIND_MATVIEW))
+ (relkind == RELKIND_MATVIEW ||
+ relkind == RELKIND_FOREIGN_TABLE))
{
RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo));
DumpableObject *dobj = &info->dobj;
@@ -6929,6 +6932,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
case RELKIND_RELATION:
case RELKIND_PARTITIONED_TABLE:
case RELKIND_MATVIEW:
+ case RELKIND_FOREIGN_TABLE:
info->section = SECTION_DATA;
break;
case RELKIND_INDEX:
@@ -6936,7 +6940,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
info->section = SECTION_POST_DATA;
break;
default:
- pg_fatal("cannot dump statistics for relation kind '%c'",
+ pg_fatal("cannot dump statistics for relation kind \"%c\"",
info->relkind);
}
@@ -9004,6 +9008,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_attalign;
int i_attislocal;
int i_notnull_name;
+ int i_notnull_comment;
int i_notnull_noinherit;
int i_notnull_islocal;
int i_notnull_invalidoid;
@@ -9087,7 +9092,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* Find out any NOT NULL markings for each column. In 18 and up we read
- * pg_constraint to obtain the constraint name. notnull_noinherit is set
+ * pg_constraint to obtain the constraint name, and for valid constraints
+ * also pg_description to obtain its comment. notnull_noinherit is set
* according to the NO INHERIT property. For versions prior to 18, we
* store an empty string as the name when a constraint is marked as
* attnotnull (this cues dumpTableSchema to print the NOT NULL clause
@@ -9095,7 +9101,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
*
* For invalid constraints, we need to store their OIDs for processing
* elsewhere, so we bring the pg_constraint.oid value when the constraint
- * is invalid, and NULL otherwise.
+ * is invalid, and NULL otherwise. Their comments are handled not here
+ * but by collectComments, because they're their own dumpable object.
*
* We track in notnull_islocal whether the constraint was defined directly
* in this table or via an ancestor, for binary upgrade. flagInhAttrs
@@ -9105,6 +9112,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
"co.conname AS notnull_name,\n"
+ "CASE WHEN co.convalidated THEN pt.description"
+ " ELSE NULL END AS notnull_comment,\n"
"CASE WHEN NOT co.convalidated THEN co.oid "
"ELSE NULL END AS notnull_invalidoid,\n"
"co.connoinherit AS notnull_noinherit,\n"
@@ -9112,6 +9121,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
else
appendPQExpBufferStr(q,
"CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
+ "NULL AS notnull_comment,\n"
"NULL AS notnull_invalidoid,\n"
"false AS notnull_noinherit,\n"
"a.attislocal AS notnull_islocal,\n");
@@ -9155,15 +9165,16 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* In versions 18 and up, we need pg_constraint for explicit NOT NULL
- * entries. Also, we need to know if the NOT NULL for each column is
- * backing a primary key.
+ * entries and pg_description to get their comments.
*/
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
" LEFT JOIN pg_catalog.pg_constraint co ON "
"(a.attrelid = co.conrelid\n"
" AND co.contype = 'n' AND "
- "co.conkey = array[a.attnum])\n");
+ "co.conkey = array[a.attnum])\n"
+ " LEFT JOIN pg_catalog.pg_description pt ON "
+ "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
appendPQExpBufferStr(q,
"WHERE a.attnum > 0::pg_catalog.int2\n"
@@ -9187,6 +9198,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
i_attalign = PQfnumber(res, "attalign");
i_attislocal = PQfnumber(res, "attislocal");
i_notnull_name = PQfnumber(res, "notnull_name");
+ i_notnull_comment = PQfnumber(res, "notnull_comment");
i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
i_notnull_islocal = PQfnumber(res, "notnull_islocal");
@@ -9255,6 +9267,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
tbinfo->attfdwoptions = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->attmissingval = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_constrs = (char **) pg_malloc(numatts * sizeof(char *));
+ tbinfo->notnull_comment = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_invalid = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_noinh = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_islocal = (bool *) pg_malloc(numatts * sizeof(bool));
@@ -9286,11 +9299,14 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
determineNotNullFlags(fout, res, r,
tbinfo, j,
i_notnull_name,
+ i_notnull_comment,
i_notnull_invalidoid,
i_notnull_noinherit,
i_notnull_islocal,
&invalidnotnulloids);
+ tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
+ NULL : pg_strdup(PQgetvalue(res, r, i_notnull_comment));
tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
@@ -9461,7 +9477,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_consrc;
int i_conislocal;
- pg_log_info("finding invalid not null constraints");
+ pg_log_info("finding invalid not-null constraints");
resetPQExpBuffer(q);
appendPQExpBuffer(q,
@@ -9702,8 +9718,9 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
* 4) The column has a constraint with a known name; in that case
* notnull_constrs carries that name and dumpTableSchema will print
* "CONSTRAINT the_name NOT NULL". However, if the name is the default
- * (table_column_not_null), there's no need to print that name in the dump,
- * so notnull_constrs is set to the empty string and it behaves as case 2.
+ * (table_column_not_null) and there's no comment on the constraint,
+ * there's no need to print that name in the dump, so notnull_constrs
+ * is set to the empty string and it behaves as case 2.
*
* In a child table that inherits from a parent already containing NOT NULL
* constraints and the columns in the child don't have their own NOT NULL
@@ -9730,6 +9747,7 @@ static void
determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
int i_notnull_name,
+ int i_notnull_comment,
int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
@@ -9803,11 +9821,13 @@ determineNotNullFlags(Archive *fout, PGresult *res, int r,
{
/*
* In binary upgrade of inheritance child tables, must have a
- * constraint name that we can UPDATE later.
+ * constraint name that we can UPDATE later; same if there's a
+ * comment on the constraint.
*/
- if (dopt->binary_upgrade &&
- !tbinfo->ispartition &&
- !tbinfo->notnull_islocal)
+ if ((dopt->binary_upgrade &&
+ !tbinfo->ispartition &&
+ !tbinfo->notnull_islocal) ||
+ !PQgetisnull(res, r, i_notnull_comment))
{
tbinfo->notnull_constrs[j] =
pstrdup(PQgetvalue(res, r, i_notnull_name));
@@ -10765,6 +10785,9 @@ fetchAttributeStats(Archive *fout)
restarted = true;
}
+ appendPQExpBufferChar(nspnames, '{');
+ appendPQExpBufferChar(relnames, '{');
+
/*
* Scan the TOC for the next set of relevant stats entries. We assume
* that statistics are dumped in the order they are listed in the TOC.
@@ -10776,23 +10799,25 @@ fetchAttributeStats(Archive *fout)
if ((te->reqs & REQ_STATS) != 0 &&
strcmp(te->desc, "STATISTICS DATA") == 0)
{
- appendPQExpBuffer(nspnames, "%s%s", count ? "," : "",
- fmtId(te->namespace));
- appendPQExpBuffer(relnames, "%s%s", count ? "," : "",
- fmtId(te->tag));
+ appendPGArray(nspnames, te->namespace);
+ appendPGArray(relnames, te->tag);
count++;
}
}
+ appendPQExpBufferChar(nspnames, '}');
+ appendPQExpBufferChar(relnames, '}');
+
/* Execute the query for the next batch of relations. */
if (count > 0)
{
PQExpBuffer query = createPQExpBuffer();
- appendPQExpBuffer(query, "EXECUTE getAttributeStats("
- "'{%s}'::pg_catalog.name[],"
- "'{%s}'::pg_catalog.name[])",
- nspnames->data, relnames->data);
+ appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
+ appendStringLiteralAH(query, nspnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[],");
+ appendStringLiteralAH(query, relnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[])");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
destroyPQExpBuffer(query);
}
@@ -10850,7 +10875,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
expected_te = expected_te->next;
if (te != expected_te)
- pg_fatal("stats dumped out of order (current: %d %s %s) (expected: %d %s %s)",
+ pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
te->dumpId, te->desc, te->tag,
expected_te->dumpId, expected_te->desc, expected_te->tag);
@@ -10924,7 +10949,20 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
appendPQExpBufferStr(out, ",\n");
appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
- appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
+ /*
+ * Before v14, a reltuples value of 0 was ambiguous: it could either mean
+ * the relation is empty, or it could mean that it hadn't yet been
+ * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
+ * This ambiguity allegedly can cause the planner to choose inefficient
+ * plans after restoring to v18 or newer. To deal with this, let's just
+ * set reltuples to -1 in that case.
+ */
+ if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
+ appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
+ else
+ appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
rsinfo->relallvisible);
@@ -10978,7 +11016,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
if (PQgetisnull(res, rownum, i_attname))
- pg_fatal("attname cannot be NULL");
+ pg_fatal("unexpected null attname");
attname = PQgetvalue(res, rownum, i_attname);
/*
@@ -17666,6 +17704,56 @@ dumpTableSchema(Archive *fout, const TableInfo *tbinfo)
if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
dumpTableSecLabel(fout, tbinfo, reltypename);
+ /*
+ * Dump comments for not-null constraints that aren't to be dumped
+ * separately (those are processed by collectComments/dumpComment).
+ */
+ if (!fout->dopt->no_comments && dopt->dumpSchema &&
+ fout->remoteVersion >= 180000)
+ {
+ PQExpBuffer comment = NULL;
+ PQExpBuffer tag = NULL;
+
+ for (j = 0; j < tbinfo->numatts; j++)
+ {
+ if (tbinfo->notnull_constrs[j] != NULL &&
+ tbinfo->notnull_comment[j] != NULL)
+ {
+ if (comment == NULL)
+ {
+ comment = createPQExpBuffer();
+ tag = createPQExpBuffer();
+ }
+ else
+ {
+ resetPQExpBuffer(comment);
+ resetPQExpBuffer(tag);
+ }
+
+ appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
+ fmtId(tbinfo->notnull_constrs[j]), qualrelname);
+ appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
+ appendPQExpBufferStr(comment, ";\n");
+
+ appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
+ fmtId(tbinfo->notnull_constrs[j]), qrelname);
+
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = comment->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
+ }
+ }
+
+ destroyPQExpBuffer(comment);
+ destroyPQExpBuffer(tag);
+ }
+
/* Dump comments on inlined table constraints */
for (j = 0; j < tbinfo->ncheck; j++)
{
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 7417eab6aef..39eef1d6617 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -365,6 +365,7 @@ typedef struct _tableInfo
* there isn't one on this column. If
* empty string, unnamed constraint
* (pre-v17) */
+ char **notnull_comment; /* comment thereof */
bool *notnull_invalid; /* true for NOT NULL NOT VALID */
bool *notnull_noinh; /* NOT NULL is NO INHERIT */
bool *notnull_islocal; /* true if NOT NULL has local definition */
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 7f9c302b719..3cbcad65c5f 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -525,7 +525,7 @@ main(int argc, char *argv[])
OPF = fopen(global_path, PG_BINARY_W);
if (!OPF)
- pg_fatal("could not open \"%s\": %m", global_path);
+ pg_fatal("could not open file \"%s\": %m", global_path);
}
else if (filename)
{
@@ -699,7 +699,7 @@ main(int argc, char *argv[])
static void
help(void)
{
- printf(_("%s extracts a PostgreSQL database cluster based on specified dump format.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
@@ -1659,14 +1659,14 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
/* Create a subdirectory with 'databases' name under main directory. */
if (mkdir(db_subdir, pg_dir_create_mode) != 0)
- pg_fatal("could not create subdirectory \"%s\": %m", db_subdir);
+ pg_fatal("could not create directory \"%s\": %m", db_subdir);
snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename);
/* Create a map file (to store dboid and dbname) */
map_file = fopen(map_file_path, PG_BINARY_W);
if (!map_file)
- pg_fatal("could not open map file: %s", strerror(errno));
+ pg_fatal("could not open file \"%s\": %m", map_file_path);
}
for (i = 0; i < PQntuples(res); i++)
@@ -1976,7 +1976,7 @@ parseDumpFormat(const char *format)
else if (pg_strcasecmp(format, "tar") == 0)
archDumpFormat = archTar;
else
- pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
+ pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
format);
return archDumpFormat;
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index f2182e91825..6ef789cb06d 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -523,7 +523,7 @@ main(int argc, char **argv)
*/
if (!globals_only && opts->createDB != 1)
{
- pg_log_error("-C/--create option should be specified when restoring an archive created by pg_dumpall");
+ pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
pg_log_error_hint("Individual databases can be restored using their specific archives.");
exit_nicely(1);
@@ -557,7 +557,7 @@ main(int argc, char **argv)
if (conn)
PQfinish(conn);
- pg_log_info("database restoring skipped as -g/--globals-only option was specified");
+ pg_log_info("database restoring skipped because option -g/--globals-only was specified");
}
else
{
@@ -712,9 +712,9 @@ usage(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data dump the data\n"));
- printf(_(" --with-schema dump the schema\n"));
- printf(_(" --with-statistics dump the statistics\n"));
+ printf(_(" --with-data restore the data\n"));
+ printf(_(" --with-schema restore the schema\n"));
+ printf(_(" --with-statistics restore the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
@@ -725,8 +725,8 @@ usage(const char *progname)
printf(_(" --role=ROLENAME do SET ROLE before restore\n"));
printf(_("\n"
- "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be combined\n"
- "and specified multiple times to select multiple objects.\n"));
+ "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n"
+ "combined and specified multiple times to select multiple objects.\n"));
printf(_("\nIf no input file name is supplied, then standard input is used.\n\n"));
printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
@@ -946,7 +946,7 @@ get_dbnames_list_to_restore(PGconn *conn,
query = createPQExpBuffer();
if (!conn)
- pg_log_info("considering PATTERN as NAME for --exclude-database option as no db connection while doing pg_restore.");
+ pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore");
/*
* Process one by one all dbnames and if specified to skip restoring, then
@@ -992,7 +992,7 @@ get_dbnames_list_to_restore(PGconn *conn,
if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res))
{
skip_db_restore = true;
- pg_log_info("database \"%s\" matches exclude pattern: \"%s\"", dbidname->str, pat_cell->val);
+ pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val);
}
PQclear(res);
@@ -1048,7 +1048,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
*/
if (!file_exists_in_directory(dumpdirpath, "map.dat"))
{
- pg_log_info("database restoring is skipped as \"map.dat\" is not present in \"%s\"", dumpdirpath);
+ pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath);
return 0;
}
@@ -1058,7 +1058,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
pfile = fopen(map_file_path, PG_BINARY_R);
if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", map_file_path);
+ pg_fatal("could not open file \"%s\": %m", map_file_path);
initStringInfo(&linebuf);
@@ -1086,10 +1086,10 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
/* Report error and exit if the file has any corrupted data. */
if (!OidIsValid(db_oid) || namelen <= 1)
- pg_fatal("invalid entry in \"%s\" at line: %d", map_file_path,
+ pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path,
count + 1);
- pg_log_info("found database \"%s\" (OID: %u) in \"%s\"",
+ pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"",
dbname, db_oid, map_file_path);
dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1);
@@ -1142,11 +1142,14 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
if (dbname_oid_list.head == NULL)
return process_global_sql_commands(conn, dumpdirpath, opts->filename);
- pg_log_info("found %d database names in \"map.dat\"", num_total_db);
+ pg_log_info(ngettext("found %d database name in \"%s\"",
+ "found %d database names in \"%s\"",
+ num_total_db),
+ num_total_db, "map.dat");
if (!conn)
{
- pg_log_info("trying to connect database \"postgres\"");
+ pg_log_info("trying to connect to database \"%s\"", "postgres");
conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost,
opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
@@ -1155,7 +1158,7 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
/* Try with template1. */
if (!conn)
{
- pg_log_info("trying to connect database \"template1\"");
+ pg_log_info("trying to connect to database \"%s\"", "template1");
conn = ConnectDatabase("template1", NULL, opts->cparams.pghost,
opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
@@ -1179,7 +1182,9 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
/* Exit if no db needs to be restored. */
if (dbname_oid_list.head == NULL || num_db_restore == 0)
{
- pg_log_info("no database needs to restore out of %d databases", num_total_db);
+ pg_log_info(ngettext("no database needs restoring out of %d database",
+ "no database needs restoring out of %d databases", num_total_db),
+ num_total_db);
return n_errors_total;
}
@@ -1314,7 +1319,7 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o
pfile = fopen(global_file_path, PG_BINARY_R);
if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", global_file_path);
+ pg_fatal("could not open file \"%s\": %m", global_file_path);
/*
* If outfile is given, then just copy all global.dat file data into
@@ -1354,15 +1359,17 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o
break;
default:
n_errors++;
- pg_log_error("could not execute query: \"%s\" \nCommand was: \"%s\"", PQerrorMessage(conn), sqlstatement.data);
+ pg_log_error("could not execute query: %s", PQerrorMessage(conn));
+ pg_log_error_detail("Command was: %s", sqlstatement.data);
}
PQclear(result);
}
/* Print a summary of ignored errors during global.dat. */
if (n_errors)
- pg_log_warning("ignored %d errors in \"%s\"", n_errors, global_file_path);
-
+ pg_log_warning(ngettext("ignored %d error in file \"%s\"",
+ "ignored %d errors in file \"%s\"", n_errors),
+ n_errors, global_file_path);
fclose(pfile);
return n_errors;
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 84ca25e17d6..c3c5fae11ea 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -240,17 +240,20 @@ command_fails_like(
command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
- 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only');
+ 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'
+);
command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --exclude-database is used in pg_restore with dump of pg_dump');
+ 'When option --exclude-database is used in pg_restore with dump of pg_dump'
+);
command_fails_like(
[ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --globals-only is not used in pg_restore with dump of pg_dump');
+ 'When option --globals-only is not used in pg_restore with dump of pg_dump'
+);
# also fails for -r and -t, but it seems pointless to add more tests for those.
command_fails_like(
@@ -261,6 +264,6 @@ command_fails_like(
command_fails_like(
[ 'pg_dumpall', '--format', 'x' ],
- qr/\Qpg_dumpall: error: unrecognized archive format "x";\E/,
- 'pg_dumpall: unrecognized archive format');
+ qr/\Qpg_dumpall: error: unrecognized output format "x";\E/,
+ 'pg_dumpall: unrecognized output format');
done_testing();
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 55d892d9c16..2485d8f360e 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -68,6 +68,7 @@ my %pgdump_runs = (
'--no-data',
'--sequence-data',
'--binary-upgrade',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
restore_cmd => [
@@ -75,6 +76,7 @@ my %pgdump_runs = (
'--format' => 'custom',
'--verbose',
'--file' => "$tempdir/binary_upgrade.sql",
+ '--with-statistics',
"$tempdir/binary_upgrade.dump",
],
},
@@ -88,11 +90,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_gzip_custom.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_custom.dump",
],
command_like => {
@@ -115,6 +119,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'gzip:1',
'--file' => "$tempdir/compression_gzip_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -132,6 +137,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_gzip_dir.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_dir",
],
},
@@ -144,6 +150,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_plain.sql.gz",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -162,11 +169,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_lz4_custom.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
@@ -189,6 +198,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'lz4:1',
'--file' => "$tempdir/compression_lz4_dir",
+ '--with-statistics',
'postgres',
],
# Verify that data files were compressed
@@ -200,6 +210,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_lz4_dir.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_dir",
],
},
@@ -212,6 +223,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_plain.sql.lz4",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -233,11 +245,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'zstd',
'--file' => "$tempdir/compression_zstd_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_zstd_custom.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_custom.dump",
],
command_like => {
@@ -259,6 +273,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'zstd:1',
'--file' => "$tempdir/compression_zstd_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -279,6 +294,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_zstd_dir.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_dir",
],
},
@@ -292,6 +308,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'zstd:long',
'--file' => "$tempdir/compression_zstd_plain.sql.zst",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -310,6 +327,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/clean.sql",
'--clean',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
},
@@ -320,6 +338,7 @@ my %pgdump_runs = (
'--clean',
'--if-exists',
'--encoding' => 'UTF8', # no-op, just for testing
+ '--with-statistics',
'postgres',
],
},
@@ -338,6 +357,7 @@ my %pgdump_runs = (
'--create',
'--no-reconnect', # no-op, just for testing
'--verbose',
+ '--with-statistics',
'postgres',
],
},
@@ -348,7 +368,7 @@ my %pgdump_runs = (
'--data-only',
'--superuser' => 'test_superuser',
'--disable-triggers',
- '--verbose', # no-op, just make sure it works
+ '--verbose', # no-op, just make sure it works
'postgres',
],
},
@@ -356,6 +376,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults.sql",
+ '--with-statistics',
'postgres',
],
},
@@ -364,6 +385,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_no_public.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -373,6 +395,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--clean',
'--file' => "$tempdir/defaults_no_public_clean.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -381,6 +404,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_public_owner.sql",
+ '--with-statistics',
'regress_public_owner',
],
},
@@ -395,12 +419,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.sql",
+ '--with-statistics',
"$tempdir/defaults_custom_format.dump",
],
command_like => {
@@ -425,12 +451,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format.sql",
+ '--with-statistics',
"$tempdir/defaults_dir_format",
],
command_like => {
@@ -456,11 +484,13 @@ my %pgdump_runs = (
'--format' => 'directory',
'--jobs' => 2,
'--file' => "$tempdir/defaults_parallel",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/defaults_parallel.sql",
+ '--with-statistics',
"$tempdir/defaults_parallel",
],
},
@@ -472,12 +502,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.tar",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.sql",
+ '--with-statistics',
"$tempdir/defaults_tar_format.tar",
],
},
@@ -486,6 +518,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_dump_test_schema.sql",
'--exclude-schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -494,6 +527,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_test_table.sql",
'--exclude-table' => 'dump_test.test_table',
+ '--with-statistics',
'postgres',
],
},
@@ -502,6 +536,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_measurement.sql",
'--exclude-table-and-children' => 'dump_test.measurement',
+ '--with-statistics',
'postgres',
],
},
@@ -511,6 +546,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_measurement_data.sql",
'--exclude-table-data-and-children' => 'dump_test.measurement',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -520,6 +556,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_test_table_data.sql",
'--exclude-table-data' => 'dump_test.test_table',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -538,6 +575,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_globals.sql",
'--globals-only',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_globals_clean => {
@@ -547,12 +585,14 @@ my %pgdump_runs = (
'--globals-only',
'--clean',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_dbprivs => {
dump_cmd => [
'pg_dumpall', '--no-sync',
'--file' => "$tempdir/pg_dumpall_dbprivs.sql",
+ '--with-statistics',
],
},
pg_dumpall_exclude => {
@@ -562,6 +602,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_exclude.sql",
'--exclude-database' => '*dump_test*',
'--no-sync',
+ '--with-statistics',
],
},
no_toast_compression => {
@@ -569,6 +610,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_toast_compression.sql",
'--no-toast-compression',
+ '--with-statistics',
'postgres',
],
},
@@ -577,6 +619,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_large_objects.sql",
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -585,6 +628,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_policies.sql",
'--no-policies',
+ '--with-statistics',
'postgres',
],
},
@@ -593,6 +637,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_privs.sql",
'--no-privileges',
+ '--with-statistics',
'postgres',
],
},
@@ -601,6 +646,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_owner.sql",
'--no-owner',
+ '--with-statistics',
'postgres',
],
},
@@ -609,6 +655,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_table_access_method.sql",
'--no-table-access-method',
+ '--with-statistics',
'postgres',
],
},
@@ -617,6 +664,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/only_dump_test_schema.sql",
'--schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -627,6 +675,7 @@ my %pgdump_runs = (
'--table' => 'dump_test.test_table',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -637,6 +686,7 @@ my %pgdump_runs = (
'--table-and-children' => 'dump_test.measurement',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -646,6 +696,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/role.sql",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
},
@@ -658,11 +709,13 @@ my %pgdump_runs = (
'--file' => "$tempdir/role_parallel",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/role_parallel.sql",
+ '--with-statistics',
"$tempdir/role_parallel",
],
},
@@ -691,6 +744,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_pre_data.sql",
'--section' => 'pre-data',
+ '--with-statistics',
'postgres',
],
},
@@ -699,6 +753,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_data.sql",
'--section' => 'data',
+ '--with-statistics',
'postgres',
],
},
@@ -707,6 +762,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_post_data.sql",
'--section' => 'post-data',
+ '--with-statistics',
'postgres',
],
},
@@ -717,6 +773,7 @@ my %pgdump_runs = (
'--schema' => 'dump_test',
'--large-objects',
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -732,6 +789,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
"--file=$tempdir/no_data_no_schema.sql", '--no-data',
'--no-schema', 'postgres',
+ '--with-statistics',
],
},
statistics_only => {
@@ -752,7 +810,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
"--file=$tempdir/no_schema.sql", '--no-schema',
- 'postgres',
+ '--with-statistics', 'postgres',
],
},);
@@ -1132,7 +1190,9 @@ my %tests = (
) INHERITS (dump_test.test_table_nn, dump_test.test_table_nn_2);
ALTER TABLE dump_test.test_table_nn ADD CONSTRAINT nn NOT NULL col1 NOT VALID;
ALTER TABLE dump_test.test_table_nn_chld1 VALIDATE CONSTRAINT nn;
- ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;',
+ ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS \'nn comment is valid\';
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS \'nn_chld2 comment is valid\';',
regexp => qr/^
\QALTER TABLE dump_test.test_table_nn\E \n^\s+
\QADD CONSTRAINT nn NOT NULL col1 NOT VALID;\E
@@ -1146,6 +1206,34 @@ my %tests = (
},
},
+ # This constraint is invalid therefore it goes in SECTION_POST_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_nn' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_post_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
+ # This constraint is valid therefore it goes in SECTION_PRE_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_chld2' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_pre_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
'CONSTRAINT NOT NULL / NOT VALID (child1)' => {
regexp => qr/^
\QCREATE TABLE dump_test.test_table_nn_chld1 (\E\n
@@ -4834,13 +4922,13 @@ my %tests = (
CREATE TABLE dump_test.has_stats
AS SELECT g.g AS x, g.g / 2 AS y FROM generate_series(1,100) AS g(g);
CREATE MATERIALIZED VIEW dump_test.has_stats_mv AS SELECT * FROM dump_test.has_stats;
- CREATE INDEX dup_test_post_data_ix ON dump_test.has_stats(x, (x - 1));
+ CREATE INDEX """dump_test""\'s post-data index" ON dump_test.has_stats(x, (x - 1));
ANALYZE dump_test.has_stats, dump_test.has_stats_mv;',
regexp => qr/^
\QSELECT * FROM pg_catalog.pg_restore_relation_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'relpages',\s'\d+'::integer,\s+
'reltuples',\s'\d+'::real,\s+
'relallvisible',\s'\d+'::integer,\s+
@@ -4849,7 +4937,7 @@ my %tests = (
\QSELECT * FROM pg_catalog.pg_restore_attribute_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'attnum',\s'2'::smallint,\s+
'inherited',\s'f'::boolean,\s+
'null_frac',\s'0'::real,\s+
diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl
index 5acd49f1559..c274b777586 100644
--- a/src/bin/pg_dump/t/006_pg_dumpall.pl
+++ b/src/bin/pg_dump/t/006_pg_dumpall.pl
@@ -294,17 +294,17 @@ my %pgdumpall_runs = (
'--format' => 'directory',
'--globals-only',
'--file' => "$tempdir/dump_globals_only",
- ],
- restore_cmd => [
- 'pg_restore', '-C', '--globals-only',
- '--format' => 'directory',
- '--file' => "$tempdir/dump_globals_only.sql",
- "$tempdir/dump_globals_only",
- ],
- like => qr/
+ ],
+ restore_cmd => [
+ 'pg_restore', '-C', '--globals-only',
+ '--format' => 'directory',
+ '--file' => "$tempdir/dump_globals_only.sql",
+ "$tempdir/dump_globals_only",
+ ],
+ like => qr/
^\s*\QCREATE ROLE dumpall;\E\s*\n
/xm
- }, );
+ },);
# First execute the setup_sql
foreach my $run (sort keys %pgdumpall_runs)
@@ -339,7 +339,8 @@ foreach my $run (sort keys %pgdumpall_runs)
# pg_restore --file output file.
my $output_file = slurp_file("$tempdir/${run}.sql");
- if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike}))
+ if ( !($pgdumpall_runs{$run}->{like})
+ && !($pgdumpall_runs{$run}->{unlike}))
{
die "missing \"like\" or \"unlike\" in test \"$run\"";
}
@@ -361,30 +362,38 @@ foreach my $run (sort keys %pgdumpall_runs)
# Some negative test case with dump of pg_dumpall and restore using pg_restore
# test case 1: when -C is not used in pg_restore with dump of pg_dumpall
$node->command_fails_like(
- [ 'pg_restore',
- "$tempdir/format_custom",
- '--format' => 'custom',
- '--file' => "$tempdir/error_test.sql", ],
- qr/\Qpg_restore: error: -C\/--create option should be specified when restoring an archive created by pg_dumpall\E/,
- 'When -C is not used in pg_restore with dump of pg_dumpall');
+ [
+ 'pg_restore',
+ "$tempdir/format_custom",
+ '--format' => 'custom',
+ '--file' => "$tempdir/error_test.sql",
+ ],
+ qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/,
+ 'When -C is not used in pg_restore with dump of pg_dumpall');
# test case 2: When --list option is used with dump of pg_dumpall
$node->command_fails_like(
- [ 'pg_restore',
+ [
+ 'pg_restore',
"$tempdir/format_custom", '-C',
- '--format' => 'custom', '--list',
- '--file' => "$tempdir/error_test.sql", ],
+ '--format' => 'custom',
+ '--list',
+ '--file' => "$tempdir/error_test.sql",
+ ],
qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/,
'When --list is used in pg_restore with dump of pg_dumpall');
# test case 3: When non-exist database is given with -d option
$node->command_fails_like(
- [ 'pg_restore',
+ [
+ 'pg_restore',
"$tempdir/format_custom", '-C',
'--format' => 'custom',
- '-d' => 'dbpq', ],
+ '-d' => 'dbpq',
+ ],
qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
- 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall');
+ 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'
+);
$node->stop('fast');
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index 3efab831797..b0234ebfaf2 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -285,7 +285,7 @@ sub run_pg_rewind
# Check that pg_rewind with dbname and --write-recovery-conf
# wrote the dbname in the generated primary_conninfo value.
like(slurp_file("$primary_pgdata/postgresql.auto.conf"),
- qr/dbname=postgres/m, 'recovery conf file sets dbname');
+ qr/dbname=postgres/m, 'recovery conf file sets dbname');
# Check that standby.signal is here as recovery configuration
# was requested.
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 940fc77fc2e..fb063a2de42 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -168,6 +168,7 @@ static DataTypesUsageChecks data_types_usage_checks[] =
/* pg_class.oid is preserved, so 'regclass' is OK */
" 'regcollation', "
" 'regconfig', "
+ /* pg_database.oid is preserved, so 'regdatabase' is OK */
" 'regdictionary', "
" 'regnamespace', "
" 'regoper', "
@@ -885,7 +886,7 @@ check_cluster_versions(void)
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 &&
user_opts.char_signedness != -1)
- pg_fatal("%s option cannot be used to upgrade from PostgreSQL %s and later.",
+ pg_fatal("The option %s cannot be used for upgrades from PostgreSQL %s and later.",
"--set-char-signedness", "18");
check_ok();
@@ -1934,7 +1935,7 @@ check_for_unicode_update(ClusterInfo *cluster)
{
fclose(report.file);
report_status(PG_WARNING, "warning");
- pg_log(PG_WARNING, "Your installation contains relations that may be affected by a new version of Unicode.\n"
+ pg_log(PG_WARNING, "Your installation contains relations that might be affected by a new version of Unicode.\n"
"A list of potentially-affected relations is in the file:\n"
" %s", report.path);
}
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 23cb08e8347..183f08ce1e8 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -58,7 +58,7 @@ generate_old_dump(void)
(user_opts.transfer_mode == TRANSFER_MODE_SWAP) ?
"" : "--sequence-data",
log_opts.verbose ? "--verbose" : "",
- user_opts.do_statistics ? "" : "--no-statistics",
+ user_opts.do_statistics ? "--with-statistics" : "--no-statistics",
log_opts.dumpdir,
sql_file_name, escaped_connstr.data);
diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c
index 2959c07f0b8..8d8e816a01f 100644
--- a/src/bin/pg_upgrade/relfilenumber.c
+++ b/src/bin/pg_upgrade/relfilenumber.c
@@ -290,19 +290,19 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid,
/* Create directory for stuff that is moved aside. */
if (pg_mkdir_p(moved_tblspc, pg_dir_create_mode) != 0 && errno != EEXIST)
- pg_fatal("could not create directory \"%s\"", moved_tblspc);
+ pg_fatal("could not create directory \"%s\": %m", moved_tblspc);
/* Create directory for old catalog files. */
if (pg_mkdir_p(old_catalog_dir, pg_dir_create_mode) != 0)
- pg_fatal("could not create directory \"%s\"", old_catalog_dir);
+ pg_fatal("could not create directory \"%s\": %m", old_catalog_dir);
/* Move the new cluster's database directory aside. */
if (rename(new_db_dir, moved_db_dir) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\"", new_db_dir, moved_db_dir);
+ pg_fatal("could not rename directory \"%s\" to \"%s\": %m", new_db_dir, moved_db_dir);
/* Move the old cluster's database directory into place. */
if (rename(old_db_dir, new_db_dir) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\"", old_db_dir, new_db_dir);
+ pg_fatal("could not rename directory \"%s\" to \"%s\": %m", old_db_dir, new_db_dir);
return true;
}
@@ -390,7 +390,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
snprintf(dest, sizeof(dest), "%s/%s", old_catalog_dir, de->d_name);
if (rename(path, dest) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest);
+ pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest);
}
if (errno)
pg_fatal("could not read directory \"%s\": %m", new_db_dir);
@@ -417,7 +417,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
snprintf(dest, sizeof(dest), "%s/%s", new_db_dir, de->d_name);
if (rename(path, dest) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest);
+ pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest);
/*
* We don't fsync() the database files in the file synchronization
diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl
index c545abf6581..e46f02c6cc6 100644
--- a/src/bin/pg_upgrade/t/004_subscription.pl
+++ b/src/bin/pg_upgrade/t/004_subscription.pl
@@ -53,7 +53,8 @@ $old_sub->safe_psql('postgres',
$old_sub->stop;
-$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 0");
+$new_sub->append_conf('postgresql.conf',
+ "max_active_replication_origins = 0");
# pg_upgrade will fail because the new cluster has insufficient
# max_active_replication_origins.
@@ -80,7 +81,8 @@ command_checks_all(
);
# Reset max_active_replication_origins
-$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 10");
+$new_sub->append_conf('postgresql.conf',
+ "max_active_replication_origins = 10");
# Cleanup
$publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1");
diff --git a/src/bin/pg_upgrade/t/005_char_signedness.pl b/src/bin/pg_upgrade/t/005_char_signedness.pl
index 17fa0d48b15..cd8cff6f513 100644
--- a/src/bin/pg_upgrade/t/005_char_signedness.pl
+++ b/src/bin/pg_upgrade/t/005_char_signedness.pl
@@ -65,7 +65,7 @@ command_checks_all(
$mode
],
1,
- [qr/--set-char-signedness option cannot be used/],
+ [qr/option --set-char-signedness cannot be used/],
[],
'--set-char-signedness option cannot be used for upgrading from v18 or later'
);
diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl
index 550a63fdf7d..58fe8a8c7dc 100644
--- a/src/bin/pg_upgrade/t/006_transfer_modes.pl
+++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl
@@ -13,7 +13,8 @@ sub test_mode
{
my ($mode) = @_;
- my $old = PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
+ my $old =
+ PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
my $new = PostgreSQL::Test::Cluster->new('new');
# --swap can't be used to upgrade from versions older than 10, so just skip
@@ -40,9 +41,11 @@ sub test_mode
# Create a small variety of simple test objects on the old cluster. We'll
# check that these reach the new version after upgrading.
$old->start;
- $old->safe_psql('postgres', "CREATE TABLE test1 AS SELECT generate_series(1, 100)");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test1 AS SELECT generate_series(1, 100)");
$old->safe_psql('postgres', "CREATE DATABASE testdb1");
- $old->safe_psql('testdb1', "CREATE TABLE test2 AS SELECT generate_series(200, 300)");
+ $old->safe_psql('testdb1',
+ "CREATE TABLE test2 AS SELECT generate_series(200, 300)");
$old->safe_psql('testdb1', "VACUUM FULL test2");
$old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432");
@@ -51,10 +54,15 @@ sub test_mode
if (defined($ENV{oldinstall}))
{
my $tblspc = PostgreSQL::Test::Utils::tempdir_short();
- $old->safe_psql('postgres', "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
- $old->safe_psql('postgres', "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
- $old->safe_psql('postgres', "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)");
- $old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)");
+ $old->safe_psql('postgres',
+ "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
+ $old->safe_psql('postgres',
+ "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"
+ );
+ $old->safe_psql('testdb2',
+ "CREATE TABLE test4 AS SELECT generate_series(400, 502)");
}
$old->stop;
@@ -90,9 +98,11 @@ sub test_mode
# tablespace.
if (defined($ENV{oldinstall}))
{
- $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
+ $result =
+ $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
is($result, '102', "test3 data after pg_upgrade $mode");
- $result = $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
+ $result =
+ $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
is($result, '103', "test4 data after pg_upgrade $mode");
}
$new->stop;
diff --git a/src/bin/pg_upgrade/task.c b/src/bin/pg_upgrade/task.c
index a48d5691390..ee0e2457152 100644
--- a/src/bin/pg_upgrade/task.c
+++ b/src/bin/pg_upgrade/task.c
@@ -192,8 +192,7 @@ start_conn(const ClusterInfo *cluster, UpgradeTaskSlot *slot)
slot->conn = PQconnectStart(conn_opts.data);
if (!slot->conn)
- pg_fatal("failed to create connection with connection string: \"%s\"",
- conn_opts.data);
+ pg_fatal("out of memory");
termPQExpBuffer(&conn_opts);
}
@@ -402,7 +401,7 @@ wait_on_slots(UpgradeTaskSlot *slots, int numslots)
* If we found socket(s) to wait on, wait.
*/
if (select_loop(maxFd, &input, &output) == -1)
- pg_fatal("select() failed: %m");
+ pg_fatal("%s() failed: %m", "select");
/*
* Mark which sockets appear to be ready.
diff --git a/src/bin/pg_verifybackup/meson.build b/src/bin/pg_verifybackup/meson.build
index 9567d55500d..f45ea790d8e 100644
--- a/src/bin/pg_verifybackup/meson.build
+++ b/src/bin/pg_verifybackup/meson.build
@@ -23,10 +23,10 @@ tests += {
'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(),
'tap': {
- 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
- 'TAR': tar.found() ? tar.path() : '',
- 'LZ4': program_lz4.found() ? program_lz4.path() : '',
- 'ZSTD': program_zstd.found() ? program_zstd.path() : ''},
+ 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '',
+ 'TAR': tar.found() ? tar.full_path() : '',
+ 'LZ4': program_lz4.found() ? program_lz4.full_path() : '',
+ 'ZSTD': program_zstd.found() ? program_zstd.full_path() : ''},
'tests': [
't/001_basic.pl',
't/002_algorithm.pl',
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index deed3ec247d..bc3d6b352ad 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -16,6 +16,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
+# Create file with some random data and an arbitrary size, useful to check
+# the solidity of the compression and decompression logic. The size of the
+# file is chosen to be around 640kB. This has proven to be large enough to
+# detect some issues related to LZ4, and low enough to not impact the runtime
+# of the test significantly.
+my $junk_data = $primary->safe_psql(
+ 'postgres', qq(
+ SELECT string_agg(encode(sha256(i::bytea), 'hex'), '')
+ FROM generate_series(1, 10240) s(i);));
+my $data_dir = $primary->data_dir;
+my $junk_file = "$data_dir/junk";
+open my $jf, '>', $junk_file
+ or die "Could not create junk file: $!";
+print $jf $junk_data;
+close $jf;
+
# Create a tablespace directory.
my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
@@ -53,6 +69,12 @@ my @test_configuration = (
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
+ 'compression_method' => 'lz4',
+ 'backup_flags' => [ '--compress', 'server-lz4:5' ],
+ 'backup_archive' => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
+ },
+ {
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'server-zstd' ],
'backup_archive' => [ 'base.tar.zst', "$tsoid.tar.zst" ],
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index d8d2b06c7ee..b62faeb5acf 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -15,6 +15,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
+# Create file with some random data and an arbitrary size, useful to check
+# the solidity of the compression and decompression logic. The size of the
+# file is chosen to be around 640kB. This has proven to be large enough to
+# detect some issues related to LZ4, and low enough to not impact the runtime
+# of the test significantly.
+my $junk_data = $primary->safe_psql(
+ 'postgres', qq(
+ SELECT string_agg(encode(sha256(i::bytea), 'hex'), '')
+ FROM generate_series(1, 10240) s(i);));
+my $data_dir = $primary->data_dir;
+my $junk_file = "$data_dir/junk";
+open my $jf, '>', $junk_file
+ or die "Could not create junk file: $!";
+print $jf $junk_data;
+close $jf;
+
my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
@@ -38,6 +54,12 @@ my @test_configuration = (
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
+ 'compression_method' => 'lz4',
+ 'backup_flags' => [ '--compress', 'client-lz4:1' ],
+ 'backup_archive' => 'base.tar.lz4',
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
+ },
+ {
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'client-zstd:5' ],
'backup_archive' => 'base.tar.zst',
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index f975c73dd75..2cc59cc8140 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -233,21 +233,9 @@ for my $o (@options)
'pgbench option error: ' . $name);
}
-# Help
-pgbench(
- '--help', 0,
- [
- qr{benchmarking tool for PostgreSQL},
- qr{Usage},
- qr{Initialization options:},
- qr{Common options:},
- qr{Report bugs to}
- ],
- [qr{^$}],
- 'pgbench help');
-
-# Version
-pgbench('-V', 0, [qr{^pgbench .PostgreSQL. }], [qr{^$}], 'pgbench version');
+program_help_ok('pgbench');
+program_version_ok('pgbench');
+program_options_handling_ok('pgbench');
# list of builtins
pgbench(
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 81a5ba844ba..9fcd2db8326 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -67,8 +67,8 @@ static backslashResult exec_command_C(PsqlScanState scan_state, bool active_bran
static backslashResult exec_command_connect(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_cd(PsqlScanState scan_state, bool active_branch,
const char *cmd);
-static backslashResult exec_command_close(PsqlScanState scan_state, bool active_branch,
- const char *cmd);
+static backslashResult exec_command_close_prepared(PsqlScanState scan_state,
+ bool active_branch, const char *cmd);
static backslashResult exec_command_conninfo(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_copy(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_copyright(PsqlScanState scan_state, bool active_branch);
@@ -330,8 +330,8 @@ exec_command(const char *cmd,
status = exec_command_connect(scan_state, active_branch);
else if (strcmp(cmd, "cd") == 0)
status = exec_command_cd(scan_state, active_branch, cmd);
- else if (strcmp(cmd, "close") == 0)
- status = exec_command_close(scan_state, active_branch, cmd);
+ else if (strcmp(cmd, "close_prepared") == 0)
+ status = exec_command_close_prepared(scan_state, active_branch, cmd);
else if (strcmp(cmd, "conninfo") == 0)
status = exec_command_conninfo(scan_state, active_branch);
else if (pg_strcasecmp(cmd, "copy") == 0)
@@ -728,10 +728,10 @@ exec_command_cd(PsqlScanState scan_state, bool active_branch, const char *cmd)
}
/*
- * \close -- close a previously prepared statement
+ * \close_prepared -- close a previously prepared statement
*/
static backslashResult
-exec_command_close(PsqlScanState scan_state, bool active_branch, const char *cmd)
+exec_command_close_prepared(PsqlScanState scan_state, bool active_branch, const char *cmd)
{
backslashResult status = PSQL_CMD_SKIP_LINE;
@@ -778,6 +778,7 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
int ssl_in_use,
password_used,
gssapi_used;
+ int version_num;
char *paramval;
if (!active_branch)
@@ -793,7 +794,9 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
/* Get values for the parameters */
host = PQhost(pset.db);
hostaddr = PQhostaddr(pset.db);
- protocol_version = psprintf("%d", PQprotocolVersion(pset.db));
+ version_num = PQfullProtocolVersion(pset.db);
+ protocol_version = psprintf("%d.%d", version_num / 10000,
+ version_num % 10000);
ssl_in_use = PQsslInUse(pset.db);
password_used = PQconnectionUsedPassword(pset.db);
gssapi_used = PQconnectionUsedGSSAPI(pset.db);
@@ -874,11 +877,11 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
printTableAddCell(&cont, _("Backend PID"), false, false);
printTableAddCell(&cont, backend_pid, false, false);
- /* TLS Connection */
- printTableAddCell(&cont, _("TLS Connection"), false, false);
+ /* SSL Connection */
+ printTableAddCell(&cont, _("SSL Connection"), false, false);
printTableAddCell(&cont, ssl_in_use ? _("true") : _("false"), false, false);
- /* TLS Information */
+ /* SSL Information */
if (ssl_in_use)
{
char *library,
@@ -895,19 +898,19 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
compression = (char *) PQsslAttribute(pset.db, "compression");
alpn = (char *) PQsslAttribute(pset.db, "alpn");
- printTableAddCell(&cont, _("TLS Library"), false, false);
+ printTableAddCell(&cont, _("SSL Library"), false, false);
printTableAddCell(&cont, library ? library : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Protocol"), false, false);
+ printTableAddCell(&cont, _("SSL Protocol"), false, false);
printTableAddCell(&cont, protocol ? protocol : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Key Bits"), false, false);
+ printTableAddCell(&cont, _("SSL Key Bits"), false, false);
printTableAddCell(&cont, key_bits ? key_bits : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Cipher"), false, false);
+ printTableAddCell(&cont, _("SSL Cipher"), false, false);
printTableAddCell(&cont, cipher ? cipher : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Compression"), false, false);
+ printTableAddCell(&cont, _("SSL Compression"), false, false);
printTableAddCell(&cont, (compression && strcmp(compression, "off") != 0) ?
_("true") : _("false"), false, false);
@@ -1946,7 +1949,7 @@ exec_command_gexec(PsqlScanState scan_state, bool active_branch)
{
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\gexec not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "gexec");
clean_extended_state();
return PSQL_CMD_ERROR;
}
@@ -1972,7 +1975,7 @@ exec_command_gset(PsqlScanState scan_state, bool active_branch)
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\gset not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "gset");
clean_extended_state();
return PSQL_CMD_ERROR;
}
@@ -3284,7 +3287,7 @@ exec_command_watch(PsqlScanState scan_state, bool active_branch,
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\watch not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "watch");
clean_extended_state();
success = false;
}
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 3e4e444f3fd..d2c0a49c46c 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -1867,6 +1867,33 @@ ExecQueryAndProcessResults(const char *query,
{
FILE *copy_stream = NULL;
+ if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
+ {
+ /*
+ * Running COPY within a pipeline can break the protocol
+ * synchronisation in multiple ways, and psql shows its limits
+ * when it comes to tracking this information.
+ *
+ * While in COPY mode, the backend process ignores additional
+ * Sync messages and will not send the matching ReadyForQuery
+ * expected by the frontend.
+ *
+ * Additionally, libpq automatically sends a Sync with the
+ * Copy message, creating an unexpected synchronisation point.
+ * A failure during COPY would leave the pipeline in an
+ * aborted state while the backend would be in a clean state,
+ * ready to process commands.
+ *
+ * Improving those issues would require modifications in how
+ * libpq handles pipelines and COPY. Hence, for the time
+ * being, we forbid the use of COPY within a pipeline,
+ * aborting the connection to avoid an inconsistent state on
+ * psql side if trying to use a COPY command.
+ */
+ pg_log_info("COPY in a pipeline is not supported, aborting connection");
+ exit(EXIT_BADCONN);
+ }
+
/*
* For COPY OUT, direct the output to the default place (probably
* a pager pipe) for \watch, or to pset.copyStream for \copy,
@@ -2601,7 +2628,7 @@ clean_extended_state(void)
switch (pset.send_mode)
{
- case PSQL_SEND_EXTENDED_CLOSE: /* \close */
+ case PSQL_SEND_EXTENDED_CLOSE: /* \close_prepared */
free(pset.stmtName);
break;
case PSQL_SEND_EXTENDED_PARSE: /* \parse */
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 1d08268393e..dd25d2fe7b8 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -296,6 +296,7 @@ describeFunctions(const char *functypes, const char *func_pattern,
char **arg_patterns, int num_arg_patterns,
bool verbose, bool showSystem)
{
+ const char *df_options = "anptwSx+";
bool showAggregate = strchr(functypes, 'a') != NULL;
bool showNormal = strchr(functypes, 'n') != NULL;
bool showProcedure = strchr(functypes, 'p') != NULL;
@@ -310,9 +311,9 @@ describeFunctions(const char *functypes, const char *func_pattern,
/* No "Parallel" column before 9.6 */
static const bool translate_columns_pre_96[] = {false, false, false, false, true, true, false, true, true, false, false, false, false};
- if (strlen(functypes) != strspn(functypes, "anptwSx+"))
+ if (strlen(functypes) != strspn(functypes, df_options))
{
- pg_log_error("\\df only takes [anptwSx+] as options");
+ pg_log_error("\\df only takes [%s] as options", df_options);
return true;
}
@@ -6188,8 +6189,8 @@ listExtensions(const char *pattern)
"FROM pg_catalog.pg_extension e "
"LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace "
"LEFT JOIN pg_catalog.pg_description d ON d.objoid = e.oid "
- "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname "
- "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass\n",
+ "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass "
+ "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname\n",
gettext_noop("Name"),
gettext_noop("Version"),
gettext_noop("Default version"),
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index 403b51325a7..a2e009ab9be 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -252,7 +252,8 @@ slashUsage(unsigned short int pager)
HELP0(" \\dO[Sx+] [PATTERN] list collations\n");
HELP0(" \\dp[Sx] [PATTERN] list table, view, and sequence access privileges\n");
HELP0(" \\dP[itnx+] [PATTERN] list [only index/table] partitioned relations [n=nested]\n");
- HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]] list per-database role settings\n");
+ HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]]\n"
+ " list per-database role settings\n");
HELP0(" \\drg[Sx] [PATTERN] list role grants\n");
HELP0(" \\dRp[x+] [PATTERN] list replication publications\n");
HELP0(" \\dRs[x+] [PATTERN] list replication subscriptions\n");
@@ -330,12 +331,12 @@ slashUsage(unsigned short int pager)
HELP0(" \\bind [PARAM]... set query parameters\n");
HELP0(" \\bind_named STMT_NAME [PARAM]...\n"
" set query parameters for an existing prepared statement\n");
- HELP0(" \\close STMT_NAME close an existing prepared statement\n");
+ HELP0(" \\close_prepared STMT_NAME\n"
+ " close an existing prepared statement\n");
HELP0(" \\endpipeline exit pipeline mode\n");
HELP0(" \\flush flush output data to the server\n");
HELP0(" \\flushrequest send request to the server to flush its output buffer\n");
- HELP0(" \\getresults [NUM_RES] read NUM_RES pending results. All pending results are\n"
- " read if no argument is provided\n");
+ HELP0(" \\getresults [NUM_RES] read NUM_RES pending results, or all if no argument\n");
HELP0(" \\parse STMT_NAME create a prepared statement\n");
HELP0(" \\sendpipeline send an extended query to an ongoing pipeline\n");
HELP0(" \\startpipeline enter pipeline mode\n");
@@ -463,8 +464,9 @@ helpVariables(unsigned short int pager)
" VERSION_NAME\n"
" VERSION_NUM\n"
" psql's version (in verbose string, short string, or numeric format)\n");
- HELP0(" WATCH_INTERVAL\n"
- " if set to a number, overrides the default two second \\watch interval\n");
+ HELPN(" WATCH_INTERVAL\n"
+ " number of seconds \\watch waits between executions (default %s)\n",
+ DEFAULT_WATCH_INTERVAL);
HELP0("\nDisplay settings:\n");
HELP0("Usage:\n");
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 4050f9a5e3e..f42c3961e09 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -483,8 +483,8 @@ psql_like($node, "copy (values ('foo'),('bar')) to stdout \\g | $pipe_cmd",
my $c4 = slurp_file($g_file);
like($c4, qr/foo.*bar/s);
-# Tests with pipelines. These trigger FATAL failures in the backend,
-# so they cannot be tested via SQL.
+# Test COPY within pipelines. These abort the connection from
+# the frontend so they cannot be tested via SQL.
$node->safe_psql('postgres', 'CREATE TABLE psql_pipeline()');
my $log_location = -s $node->logfile;
psql_fails_like(
@@ -493,35 +493,41 @@ psql_fails_like(
COPY psql_pipeline FROM STDIN;
SELECT 'val1';
\\syncpipeline
-\\getresults
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: direct COPY, SELECT, sync and getresult'
-);
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ 'COPY FROM in pipeline: fails');
$node->wait_for_log(
qr/FATAL: .*terminating connection because protocol synchronization was lost/,
$log_location);
+# Remove \syncpipeline here.
psql_fails_like(
$node,
qq{\\startpipeline
-COPY psql_pipeline FROM STDIN \\bind \\sendpipeline
-SELECT 'val1' \\bind \\sendpipeline
-\\syncpipeline
-\\getresults
+COPY psql_pipeline TO STDOUT;
+SELECT 'val1';
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: bind COPY, SELECT, sync and getresult');
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ 'COPY TO in pipeline: fails');
-# This time, test without the \getresults.
psql_fails_like(
$node,
qq{\\startpipeline
-COPY psql_pipeline FROM STDIN;
+\\copy psql_pipeline from stdin;
SELECT 'val1';
\\syncpipeline
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: COPY, SELECT and sync');
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ '\copy from in pipeline: fails');
+
+# Sync attempt after a COPY TO/FROM.
+psql_fails_like(
+ $node,
+ qq{\\startpipeline
+\\copy psql_pipeline to stdout;
+\\syncpipeline
+\\endpipeline},
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ '\copy to in pipeline: fails');
done_testing();
diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c
index ec65ab79fec..53e7d35fe98 100644
--- a/src/bin/psql/tab-complete.in.c
+++ b/src/bin/psql/tab-complete.in.c
@@ -889,6 +889,14 @@ static const SchemaQuery Query_for_list_of_analyzables = {
.result = "c.relname",
};
+/*
+ * Relations supporting COPY TO/FROM are currently almost the same as
+ * those supporting ANALYZE. Although views with INSTEAD OF INSERT triggers
+ * can be used with COPY FROM, they are rarely used for this purpose,
+ * so plain views are intentionally excluded from this tab completion.
+ */
+#define Query_for_list_of_tables_for_copy Query_for_list_of_analyzables
+
/* Relations supporting index creation */
static const SchemaQuery Query_for_list_of_indexables = {
.catname = "pg_catalog.pg_class c",
@@ -1875,7 +1883,7 @@ psql_completion(const char *text, int start, int end)
static const char *const backslash_commands[] = {
"\\a",
"\\bind", "\\bind_named",
- "\\connect", "\\conninfo", "\\C", "\\cd", "\\close", "\\copy",
+ "\\connect", "\\conninfo", "\\C", "\\cd", "\\close_prepared", "\\copy",
"\\copyright", "\\crosstabview",
"\\d", "\\da", "\\dA", "\\dAc", "\\dAf", "\\dAo", "\\dAp",
"\\db", "\\dc", "\\dconfig", "\\dC", "\\dd", "\\ddp", "\\dD",
@@ -2725,17 +2733,24 @@ match_previous_words(int pattern_id,
/* ALTER TABLE xxx ADD */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD"))
{
- /* make sure to keep this list and the !Matches() below in sync */
- COMPLETE_WITH("COLUMN", "CONSTRAINT", "CHECK", "UNIQUE", "PRIMARY KEY",
- "EXCLUDE", "FOREIGN KEY");
+ /*
+ * make sure to keep this list and the MatchAnyExcept() below in sync
+ */
+ COMPLETE_WITH("COLUMN", "CONSTRAINT", "CHECK (", "NOT NULL", "UNIQUE",
+ "PRIMARY KEY", "EXCLUDE", "FOREIGN KEY");
}
/* ALTER TABLE xxx ADD [COLUMN] yyy */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN", MatchAny) ||
- Matches("ALTER", "TABLE", MatchAny, "ADD", MatchAnyExcept("COLUMN|CONSTRAINT|CHECK|UNIQUE|PRIMARY|EXCLUDE|FOREIGN")))
+ Matches("ALTER", "TABLE", MatchAny, "ADD", MatchAnyExcept("COLUMN|CONSTRAINT|CHECK|UNIQUE|PRIMARY|NOT|EXCLUDE|FOREIGN")))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_datatypes);
/* ALTER TABLE xxx ADD CONSTRAINT yyy */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "CONSTRAINT", MatchAny))
- COMPLETE_WITH("CHECK", "UNIQUE", "PRIMARY KEY", "EXCLUDE", "FOREIGN KEY");
+ COMPLETE_WITH("CHECK (", "NOT NULL", "UNIQUE", "PRIMARY KEY", "EXCLUDE", "FOREIGN KEY");
+ /* ALTER TABLE xxx ADD NOT NULL */
+ else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "NOT", "NULL"))
+ COMPLETE_WITH_ATTR(prev4_wd);
+ else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "CONSTRAINT", MatchAny, "NOT", "NULL"))
+ COMPLETE_WITH_ATTR(prev6_wd);
/* ALTER TABLE xxx ADD [CONSTRAINT yyy] (PRIMARY KEY|UNIQUE) */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "PRIMARY", "KEY") ||
Matches("ALTER", "TABLE", MatchAny, "ADD", "UNIQUE") ||
@@ -3255,7 +3270,7 @@ match_previous_words(int pattern_id,
* backslash command).
*/
else if (Matches("COPY|\\copy"))
- COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables, "(");
+ COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables_for_copy, "(");
/* Complete COPY ( with legal query commands */
else if (Matches("COPY|\\copy", "("))
COMPLETE_WITH("SELECT", "TABLE", "VALUES", "INSERT INTO", "UPDATE", "DELETE FROM", "MERGE INTO", "WITH");
@@ -3289,7 +3304,7 @@ match_previous_words(int pattern_id,
COMPLETE_WITH("FORMAT", "FREEZE", "DELIMITER", "NULL",
"HEADER", "QUOTE", "ESCAPE", "FORCE_QUOTE",
"FORCE_NOT_NULL", "FORCE_NULL", "ENCODING", "DEFAULT",
- "ON_ERROR", "LOG_VERBOSITY");
+ "ON_ERROR", "LOG_VERBOSITY", "REJECT_LIMIT");
/* Complete COPY <sth> FROM|TO filename WITH (FORMAT */
else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "WITH", "(", "FORMAT"))
@@ -3664,9 +3679,10 @@ match_previous_words(int pattern_id,
TailMatches("CREATE", "TEMP|TEMPORARY|UNLOGGED", "TABLE", MatchAny, "(*)", "AS"))
COMPLETE_WITH("EXECUTE", "SELECT", "TABLE", "VALUES", "WITH");
/* Complete CREATE TABLE name (...) with supported options */
- else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)") ||
- TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "PARTITION BY", "USING", "TABLESPACE", "WITH (");
+ else if (TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ COMPLETE_WITH("AS", "INHERITS (", "USING", "TABLESPACE", "WITH (");
else if (TailMatches("CREATE", "TEMP|TEMPORARY", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "ON COMMIT", "PARTITION BY", "USING",
"TABLESPACE", "WITH (");
diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c
index ae2d0e5ed3f..6b64302ebca 100644
--- a/src/bin/psql/variables.c
+++ b/src/bin/psql/variables.c
@@ -204,7 +204,7 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
if ((value == NULL) || (*value == '\0'))
{
if (name)
- pg_log_error("invalid input syntax for \"%s\"", name);
+ pg_log_error("invalid input syntax for variable \"%s\"", name);
return false;
}
@@ -215,14 +215,14 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
if (dblval < min)
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\": must be greater than %.2f",
+ pg_log_error("invalid value \"%s\" for variable \"%s\": must be greater than %.2f",
value, name, min);
return false;
}
else if (dblval > max)
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\": must be less than %.2f",
+ pg_log_error("invalid value \"%s\" for variable \"%s\": must be less than %.2f",
value, name, max);
}
*result = dblval;
@@ -238,13 +238,13 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
(dblval == 0.0 || dblval >= HUGE_VAL || dblval <= -HUGE_VAL))
{
if (name)
- pg_log_error("\"%s\" is out of range for \"%s\"", value, name);
+ pg_log_error("value \"%s\" is out of range for variable \"%s\"", value, name);
return false;
}
else
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\"", value, name);
+ pg_log_error("invalid value \"%s\" for variable \"%s\"", value, name);
return false;
}
}
diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl
index 75ac24a7a55..ff56a13b46b 100644
--- a/src/bin/scripts/t/100_vacuumdb.pl
+++ b/src/bin/scripts/t/100_vacuumdb.pl
@@ -238,62 +238,105 @@ $node->command_fails_like(
'cannot use option --all and a dbname as argument at the same time');
$node->safe_psql('postgres',
- 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;');
+ 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing stats');
$node->safe_psql('postgres',
- 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));');
+ 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing index expression stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing index expression stats');
$node->safe_psql('postgres',
- 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;');
+ 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing extended stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing extended stats');
$node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n"
- . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
- . "ANALYZE regression_vacuumdb_child;\n");
+ . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
+ . "ANALYZE regression_vacuumdb_child;\n");
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing inherited stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing inherited stats');
$node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n"
- . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
- . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
- . "ANALYZE regression_vacuumdb_part1;\n");
+ . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
+ . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
+ . "ANALYZE regression_vacuumdb_part1;\n");
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_parted', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing partition stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_parted', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing partition stats');