aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--config/check_modules.pl5
-rw-r--r--contrib/amcheck/verify_heapam.c6
-rw-r--r--contrib/basebackup_to_shell/basebackup_to_shell.c27
-rw-r--r--contrib/basebackup_to_shell/t/001_basic.pl71
-rw-r--r--contrib/basic_archive/basic_archive.c51
-rw-r--r--contrib/btree_gist/btree_bool.c4
-rw-r--r--contrib/hstore_plpython/hstore_plpython.c4
-rw-r--r--contrib/pageinspect/brinfuncs.c24
-rw-r--r--contrib/pageinspect/gistfuncs.c48
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c5
-rw-r--r--contrib/pg_walinspect/pg_walinspect.c60
-rw-r--r--contrib/pgcrypto/openssl.c6
-rw-r--r--contrib/pgstattuple/pgstattuple.c68
-rw-r--r--contrib/postgres_fdw/connection.c10
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c10
-rw-r--r--contrib/test_decoding/test_decoding.c8
-rw-r--r--src/backend/access/common/toast_internals.c6
-rw-r--r--src/backend/access/heap/pruneheap.c12
-rw-r--r--src/backend/access/heap/vacuumlazy.c14
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/xlogdesc.c2
-rw-r--r--src/backend/access/transam/rmgr.c6
-rw-r--r--src/backend/access/transam/twophase.c6
-rw-r--r--src/backend/access/transam/xlog.c28
-rw-r--r--src/backend/access/transam/xlogarchive.c16
-rw-r--r--src/backend/access/transam/xlogfuncs.c8
-rw-r--r--src/backend/access/transam/xlogrecovery.c15
-rw-r--r--src/backend/access/transam/xlogstats.c10
-rw-r--r--src/backend/access/transam/xlogutils.c13
-rw-r--r--src/backend/catalog/Catalog.pm23
-rw-r--r--src/backend/catalog/genbki.pl12
-rw-r--r--src/backend/catalog/heap.c11
-rw-r--r--src/backend/catalog/index.c6
-rw-r--r--src/backend/catalog/objectaccess.c30
-rw-r--r--src/backend/catalog/pg_constraint.c6
-rw-r--r--src/backend/catalog/pg_publication.c12
-rw-r--r--src/backend/catalog/storage.c12
-rw-r--r--src/backend/commands/analyze.c10
-rw-r--r--src/backend/commands/collationcmds.c5
-rw-r--r--src/backend/commands/copy.c6
-rw-r--r--src/backend/commands/copyfromparse.c3
-rw-r--r--src/backend/commands/copyto.c6
-rw-r--r--src/backend/commands/dbcommands.c64
-rw-r--r--src/backend/commands/explain.c2
-rw-r--r--src/backend/commands/extension.c8
-rw-r--r--src/backend/commands/matview.c4
-rw-r--r--src/backend/commands/publicationcmds.c47
-rw-r--r--src/backend/commands/statscmds.c15
-rw-r--r--src/backend/commands/subscriptioncmds.c14
-rw-r--r--src/backend/commands/tablecmds.c28
-rw-r--r--src/backend/commands/tablespace.c2
-rw-r--r--src/backend/commands/user.c4
-rw-r--r--src/backend/commands/vacuum.c4
-rw-r--r--src/backend/commands/vacuumparallel.c2
-rw-r--r--src/backend/executor/execExpr.c4
-rw-r--r--src/backend/executor/execExprInterp.c65
-rw-r--r--src/backend/executor/nodeIndexscan.c1
-rw-r--r--src/backend/executor/nodeMemoize.c2
-rw-r--r--src/backend/executor/nodeModifyTable.c2
-rw-r--r--src/backend/executor/spi.c16
-rw-r--r--src/backend/jit/llvm/llvmjit.c4
-rw-r--r--src/backend/lib/dshash.c6
-rw-r--r--src/backend/libpq/pqcomm.c4
-rw-r--r--src/backend/nodes/copyfuncs.c24
-rw-r--r--src/backend/nodes/equalfuncs.c3
-rw-r--r--src/backend/nodes/nodeFuncs.c15
-rw-r--r--src/backend/nodes/outfuncs.c8
-rw-r--r--src/backend/nodes/value.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c15
-rw-r--r--src/backend/optimizer/path/costsize.c111
-rw-r--r--src/backend/optimizer/path/equivclass.c6
-rw-r--r--src/backend/optimizer/path/joinpath.c2
-rw-r--r--src/backend/optimizer/path/pathkeys.c143
-rw-r--r--src/backend/optimizer/plan/createplan.c4
-rw-r--r--src/backend/optimizer/plan/planner.c66
-rw-r--r--src/backend/optimizer/util/clauses.c8
-rw-r--r--src/backend/optimizer/util/plancat.c164
-rw-r--r--src/backend/parser/analyze.c13
-rw-r--r--src/backend/parser/parse_clause.c2
-rw-r--r--src/backend/parser/parse_collate.c7
-rw-r--r--src/backend/parser/parse_expr.c85
-rw-r--r--src/backend/parser/parse_jsontable.c74
-rw-r--r--src/backend/parser/parse_node.c83
-rw-r--r--src/backend/parser/parse_param.c4
-rw-r--r--src/backend/parser/parse_relation.c2
-rw-r--r--src/backend/partitioning/partdesc.c4
-rw-r--r--src/backend/postmaster/autovacuum.c15
-rw-r--r--src/backend/postmaster/bgworker.c6
-rw-r--r--src/backend/postmaster/pgarch.c44
-rw-r--r--src/backend/postmaster/postmaster.c4
-rw-r--r--src/backend/postmaster/startup.c2
-rw-r--r--src/backend/postmaster/walwriter.c6
-rw-r--r--src/backend/regex/regc_pg_locale.c10
-rw-r--r--src/backend/replication/backup_manifest.c2
-rw-r--r--src/backend/replication/basebackup_copy.c12
-rw-r--r--src/backend/replication/basebackup_gzip.c35
-rw-r--r--src/backend/replication/basebackup_lz4.c2
-rw-r--r--src/backend/replication/basebackup_server.c18
-rw-r--r--src/backend/replication/basebackup_target.c12
-rw-r--r--src/backend/replication/basebackup_zstd.c6
-rw-r--r--src/backend/replication/logical/decode.c2
-rw-r--r--src/backend/replication/logical/launcher.c6
-rw-r--r--src/backend/replication/logical/reorderbuffer.c4
-rw-r--r--src/backend/replication/logical/tablesync.c34
-rw-r--r--src/backend/replication/logical/worker.c4
-rw-r--r--src/backend/replication/pgoutput/pgoutput.c60
-rw-r--r--src/backend/replication/slot.c4
-rw-r--r--src/backend/replication/walreceiver.c6
-rw-r--r--src/backend/replication/walsender.c6
-rw-r--r--src/backend/statistics/dependencies.c2
-rw-r--r--src/backend/storage/buffer/bufmgr.c33
-rw-r--r--src/backend/storage/ipc/procarray.c4
-rw-r--r--src/backend/storage/ipc/shm_mq.c6
-rw-r--r--src/backend/storage/ipc/sinvaladt.c9
-rw-r--r--src/backend/storage/page/bufpage.c2
-rw-r--r--src/backend/tcop/postgres.c24
-rw-r--r--src/backend/utils/adt/arrayfuncs.c3
-rw-r--r--src/backend/utils/adt/dbsize.c10
-rw-r--r--src/backend/utils/adt/formatting.c16
-rw-r--r--src/backend/utils/adt/json.c25
-rw-r--r--src/backend/utils/adt/jsonb.c12
-rw-r--r--src/backend/utils/adt/jsonb_util.c3
-rw-r--r--src/backend/utils/adt/jsonfuncs.c7
-rw-r--r--src/backend/utils/adt/jsonpath.c10
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c36
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/multirangetypes.c3
-rw-r--r--src/backend/utils/adt/numeric.c215
-rw-r--r--src/backend/utils/adt/pg_locale.c4
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c2
-rw-r--r--src/backend/utils/adt/rangetypes_spgist.c4
-rw-r--r--src/backend/utils/adt/ri_triggers.c27
-rw-r--r--src/backend/utils/adt/ruleutils.c50
-rw-r--r--src/backend/utils/adt/selfuncs.c12
-rw-r--r--src/backend/utils/adt/timestamp.c227
-rw-r--r--src/backend/utils/adt/uuid.c4
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c8
-rw-r--r--src/backend/utils/cache/plancache.c8
-rw-r--r--src/backend/utils/cache/relcache.c23
-rw-r--r--src/backend/utils/cache/relmapper.c16
-rw-r--r--src/backend/utils/init/postinit.c16
-rw-r--r--src/backend/utils/misc/queryjumble.c2
-rw-r--r--src/backend/utils/sort/tuplesort.c18
-rw-r--r--src/bin/initdb/initdb.c4
-rw-r--r--src/bin/initdb/t/001_initdb.pl36
-rw-r--r--src/bin/pg_amcheck/pg_amcheck.c10
-rw-r--r--src/bin/pg_amcheck/t/002_nonesuch.pl35
-rw-r--r--src/bin/pg_amcheck/t/005_opclass_damage.pl3
-rw-r--r--src/bin/pg_basebackup/bbstreamer_file.c4
-rw-r--r--src/bin/pg_basebackup/bbstreamer_gzip.c27
-rw-r--r--src/bin/pg_basebackup/bbstreamer_lz4.c116
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c89
-rw-r--r--src/bin/pg_basebackup/streamutil.c2
-rw-r--r--src/bin/pg_basebackup/t/010_pg_basebackup.pl104
-rw-r--r--src/bin/pg_basebackup/t/020_pg_receivewal.pl4
-rw-r--r--src/bin/pg_basebackup/t/030_pg_recvlogical.pl17
-rw-r--r--src/bin/pg_ctl/pg_ctl.c6
-rw-r--r--src/bin/pg_ctl/t/002_status.pl2
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c8
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c10
-rw-r--r--src/bin/pg_dump/pg_dump.c19
-rw-r--r--src/bin/pg_dump/pg_dumpall.c2
-rw-r--r--src/bin/pg_dump/t/001_basic.pl2
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl46
-rw-r--r--src/bin/pg_dump/t/003_pg_dump_with_server.pl2
-rw-r--r--src/bin/pg_dump/t/010_dump_connstr.pl9
-rw-r--r--src/bin/pg_rewind/filemap.c6
-rw-r--r--src/bin/pg_rewind/t/004_pg_xlog_symlink.pl3
-rw-r--r--src/bin/pg_rewind/t/009_growing_files.pl5
-rw-r--r--src/bin/pg_rewind/t/RewindTest.pm13
-rw-r--r--src/bin/pg_upgrade/t/002_pg_upgrade.pl5
-rw-r--r--src/bin/pg_upgrade/util.c1
-rw-r--r--src/bin/pg_verifybackup/t/003_corruption.pl2
-rw-r--r--src/bin/pg_verifybackup/t/004_options.pl3
-rw-r--r--src/bin/pg_verifybackup/t/005_bad_manifest.pl6
-rw-r--r--src/bin/pg_verifybackup/t/007_wal.pl6
-rw-r--r--src/bin/pg_verifybackup/t/008_untar.pl75
-rw-r--r--src/bin/pg_verifybackup/t/009_extract.pl46
-rw-r--r--src/bin/pg_verifybackup/t/010_client_untar.pl91
-rw-r--r--src/bin/pg_waldump/pg_waldump.c2
-rw-r--r--src/bin/pgbench/pgbench.c172
-rw-r--r--src/bin/pgbench/t/001_pgbench_with_server.pl36
-rw-r--r--src/bin/pgbench/t/002_pgbench_no_server.pl6
-rw-r--r--src/bin/psql/common.c59
-rw-r--r--src/bin/psql/describe.c10
-rw-r--r--src/bin/psql/t/001_basic.pl57
-rw-r--r--src/bin/psql/t/010_tab_completion.pl27
-rw-r--r--src/bin/psql/t/020_cancel.pl29
-rw-r--r--src/bin/psql/tab-complete.c3
-rw-r--r--src/bin/scripts/t/020_createdb.pl14
-rw-r--r--src/common/compression.c4
-rw-r--r--src/common/cryptohash_openssl.c1
-rw-r--r--src/common/exec.c2
-rw-r--r--src/include/access/amapi.h2
-rw-r--r--src/include/access/heapam.h2
-rw-r--r--src/include/access/rmgr.h2
-rw-r--r--src/include/access/xact.h11
-rw-r--r--src/include/access/xlogstats.h12
-rw-r--r--src/include/access/xlogutils.h2
-rw-r--r--src/include/catalog/objectaccess.h14
-rw-r--r--src/include/catalog/pg_aggregate.dat3
-rw-r--r--src/include/catalog/pg_class.h2
-rw-r--r--src/include/catalog/pg_collation.h2
-rw-r--r--src/include/catalog/pg_database.dat3
-rw-r--r--src/include/catalog/pg_parameter_acl.h2
-rw-r--r--src/include/catalog/pg_proc.dat43
-rw-r--r--src/include/catalog/pg_publication.h4
-rw-r--r--src/include/catalog/pg_statistic_ext_data.h2
-rwxr-xr-xsrc/include/catalog/renumber_oids.pl14
-rw-r--r--src/include/commands/publicationcmds.h4
-rw-r--r--src/include/executor/execExpr.h32
-rw-r--r--src/include/executor/executor.h2
-rw-r--r--src/include/fmgr.h2
-rw-r--r--src/include/nodes/execnodes.h4
-rw-r--r--src/include/nodes/nodes.h3
-rw-r--r--src/include/nodes/parsenodes.h72
-rw-r--r--src/include/nodes/pathnodes.h46
-rw-r--r--src/include/nodes/plannodes.h6
-rw-r--r--src/include/nodes/primnodes.h43
-rw-r--r--src/include/optimizer/paths.h6
-rw-r--r--src/include/parser/analyze.h2
-rw-r--r--src/include/parser/parse_param.h4
-rw-r--r--src/include/port.h2
-rw-r--r--src/include/postmaster/pgarch.h6
-rw-r--r--src/include/replication/basebackup_target.h2
-rw-r--r--src/include/replication/decode.h2
-rw-r--r--src/include/replication/slot.h2
-rw-r--r--src/include/storage/latch.h2
-rw-r--r--src/include/tcop/tcopprot.h6
-rw-r--r--src/include/utils/formatting.h2
-rw-r--r--src/include/utils/jsonpath.h10
-rw-r--r--src/include/utils/rel.h4
-rw-r--r--src/include/utils/relmapper.h2
-rw-r--r--src/include/utils/selfuncs.h6
-rw-r--r--src/include/utils/sortsupport.h6
-rw-r--r--src/interfaces/libpq/fe-auth.c8
-rw-r--r--src/interfaces/libpq/fe-secure-common.c1
-rw-r--r--src/interfaces/libpq/t/002_api.pl6
-rw-r--r--src/test/icu/t/010_database.pl24
-rw-r--r--src/test/ldap/t/001_auth.pl3
-rw-r--r--src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl3
-rw-r--r--src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl3
-rw-r--r--src/test/modules/test_misc/t/002_tablespace.pl34
-rw-r--r--src/test/modules/test_oat_hooks/test_oat_hooks.c1797
-rw-r--r--src/test/modules/test_pg_dump/t/001_base.pl2
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm106
-rw-r--r--src/test/perl/PostgreSQL/Test/SimpleTee.pm8
-rw-r--r--src/test/perl/PostgreSQL/Test/Utils.pm17
-rw-r--r--src/test/perl/PostgreSQL/Version.pm16
-rw-r--r--src/test/recovery/t/001_stream_rep.pl3
-rw-r--r--src/test/recovery/t/002_archiving.pl2
-rw-r--r--src/test/recovery/t/006_logical_decoding.pl80
-rw-r--r--src/test/recovery/t/013_crash_restart.pl20
-rw-r--r--src/test/recovery/t/014_unlogged_reinit.pl3
-rw-r--r--src/test/recovery/t/019_replslot_limit.pl10
-rw-r--r--src/test/recovery/t/022_crash_temp_files.pl26
-rw-r--r--src/test/recovery/t/027_stream_regress.pl54
-rw-r--r--src/test/recovery/t/029_stats_restart.pl3
-rw-r--r--src/test/recovery/t/031_recovery_conflict.pl9
-rw-r--r--src/test/recovery/t/032_relfilenode_reuse.pl82
-rw-r--r--src/test/regress/regress.c6
-rw-r--r--src/test/ssl/t/001_ssltests.pl105
-rw-r--r--src/test/ssl/t/002_scram.pl11
-rw-r--r--src/test/ssl/t/003_sslinfo.pl103
-rw-r--r--src/test/ssl/t/SSL/Backend/OpenSSL.pm13
-rw-r--r--src/test/ssl/t/SSL/Server.pm29
-rw-r--r--src/test/subscription/t/001_rep_changes.pl22
-rw-r--r--src/test/subscription/t/007_ddl.pl12
-rw-r--r--src/test/subscription/t/013_partition.pl42
-rw-r--r--src/test/subscription/t/021_twophase.pl142
-rw-r--r--src/test/subscription/t/022_twophase_cascade.pl209
-rw-r--r--src/test/subscription/t/023_twophase_stream.pl132
-rw-r--r--src/test/subscription/t/024_add_drop_pub.pl3
-rw-r--r--src/test/subscription/t/025_rep_changes_for_schema.pl25
-rw-r--r--src/test/subscription/t/027_nosuperuser.pl167
-rw-r--r--src/test/subscription/t/028_row_filter.pl11
-rw-r--r--src/test/subscription/t/031_column_list.pl486
-rw-r--r--src/tools/PerfectHash.pm1
-rw-r--r--src/tools/ci/windows_build_config.pl6
-rwxr-xr-xsrc/tools/mark_pgdllimport.pl16
-rw-r--r--src/tools/msvc/MSBuildProject.pm2
-rw-r--r--src/tools/msvc/Mkvcbuild.pm53
-rw-r--r--src/tools/msvc/Project.pm10
-rw-r--r--src/tools/msvc/Solution.pm2
-rw-r--r--src/tools/msvc/vcregress.pl8
-rw-r--r--src/tools/pgindent/typedefs.list188
287 files changed, 5195 insertions, 3551 deletions
diff --git a/config/check_modules.pl b/config/check_modules.pl
index 470c3e9c144..611f3a673fd 100644
--- a/config/check_modules.pl
+++ b/config/check_modules.pl
@@ -21,8 +21,7 @@ diag("Test::More::VERSION: $Test::More::VERSION");
diag("Time::HiRes::VERSION: $Time::HiRes::VERSION");
# Check that if prove is using msys perl it is for an msys target
-ok(($ENV{__CONFIG_HOST_OS__} || "") eq 'msys',
- "Msys perl used for correct target")
- if $Config{osname} eq 'msys';
+ok( ($ENV{__CONFIG_HOST_OS__} || "") eq 'msys',
+ "Msys perl used for correct target") if $Config{osname} eq 'msys';
ok(1);
done_testing();
diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c
index e5f7355dcb8..c875f3e5a2a 100644
--- a/contrib/amcheck/verify_heapam.c
+++ b/contrib/amcheck/verify_heapam.c
@@ -1402,17 +1402,17 @@ check_tuple_attribute(HeapCheckContext *ctx)
cmid = TOAST_COMPRESS_METHOD(&toast_pointer);
switch (cmid)
{
- /* List of all valid compression method IDs */
+ /* List of all valid compression method IDs */
case TOAST_PGLZ_COMPRESSION_ID:
case TOAST_LZ4_COMPRESSION_ID:
valid = true;
break;
- /* Recognized but invalid compression method ID */
+ /* Recognized but invalid compression method ID */
case TOAST_INVALID_COMPRESSION_ID:
break;
- /* Intentionally no default here */
+ /* Intentionally no default here */
}
if (!valid)
report_corruption(ctx,
diff --git a/contrib/basebackup_to_shell/basebackup_to_shell.c b/contrib/basebackup_to_shell/basebackup_to_shell.c
index a279219966d..bc754b177af 100644
--- a/contrib/basebackup_to_shell/basebackup_to_shell.c
+++ b/contrib/basebackup_to_shell/basebackup_to_shell.c
@@ -37,13 +37,13 @@ typedef struct bbsink_shell
FILE *pipe;
} bbsink_shell;
-void _PG_init(void);
+void _PG_init(void);
static void *shell_check_detail(char *target, char *target_detail);
static bbsink *shell_get_sink(bbsink *next_sink, void *detail_arg);
static void bbsink_shell_begin_archive(bbsink *sink,
- const char *archive_name);
+ const char *archive_name);
static void bbsink_shell_archive_contents(bbsink *sink, size_t len);
static void bbsink_shell_end_archive(bbsink *sink);
static void bbsink_shell_begin_manifest(bbsink *sink);
@@ -101,7 +101,7 @@ shell_check_detail(char *target, char *target_detail)
{
if (shell_required_role[0] != '\0')
{
- Oid roleid;
+ Oid roleid;
StartTransactionCommand();
roleid = get_role_oid(shell_required_role, true);
@@ -125,8 +125,8 @@ static bbsink *
shell_get_sink(bbsink *next_sink, void *detail_arg)
{
bbsink_shell *sink;
- bool has_detail_escape = false;
- char *c;
+ bool has_detail_escape = false;
+ char *c;
/*
* Set up the bbsink.
@@ -171,15 +171,15 @@ shell_get_sink(bbsink *next_sink, void *detail_arg)
/*
* Since we're passing the string provided by the user to popen(), it will
* be interpreted by the shell, which is a potential security
- * vulnerability, since the user invoking this module is not necessarily
- * a superuser. To stay out of trouble, we must disallow any shell
+ * vulnerability, since the user invoking this module is not necessarily a
+ * superuser. To stay out of trouble, we must disallow any shell
* metacharacters here; to be conservative and keep things simple, we
* allow only alphanumerics.
*/
if (sink->target_detail != NULL)
{
- char *d;
- bool scary = false;
+ char *d;
+ bool scary = false;
for (d = sink->target_detail; *d != '\0'; ++d)
{
@@ -210,7 +210,7 @@ static char *
shell_construct_command(char *base_command, const char *filename,
char *target_detail)
{
- StringInfoData buf;
+ StringInfoData buf;
char *c;
initStringInfo(&buf);
@@ -271,7 +271,7 @@ shell_construct_command(char *base_command, const char *filename,
static void
shell_finish_command(bbsink_shell *sink)
{
- int pclose_rc;
+ int pclose_rc;
/* There should be a command running. */
Assert(sink->current_command != NULL);
@@ -335,9 +335,8 @@ shell_send_data(bbsink_shell *sink, size_t len)
{
/*
* The error we're about to throw would shut down the command
- * anyway, but we may get a more meaningful error message by
- * doing this. If not, we'll fall through to the generic error
- * below.
+ * anyway, but we may get a more meaningful error message by doing
+ * this. If not, we'll fall through to the generic error below.
*/
shell_finish_command(sink);
errno = EPIPE;
diff --git a/contrib/basebackup_to_shell/t/001_basic.pl b/contrib/basebackup_to_shell/t/001_basic.pl
index 350d42079a7..acb66eb9a84 100644
--- a/contrib/basebackup_to_shell/t/001_basic.pl
+++ b/contrib/basebackup_to_shell/t/001_basic.pl
@@ -20,11 +20,12 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
# Make sure pg_hba.conf is set up to allow connections from backupuser.
# This is only needed on Windows machines that don't use UNIX sockets.
-$node->init('allows_streaming' => 1,
- 'auth_extra' => [ '--create-role', 'backupuser' ]);
+$node->init(
+ 'allows_streaming' => 1,
+ 'auth_extra' => [ '--create-role', 'backupuser' ]);
$node->append_conf('postgresql.conf',
- "shared_preload_libraries = 'basebackup_to_shell'");
+ "shared_preload_libraries = 'basebackup_to_shell'");
$node->start;
$node->safe_psql('postgres', 'CREATE USER backupuser REPLICATION');
$node->safe_psql('postgres', 'CREATE ROLE trustworthy');
@@ -41,61 +42,61 @@ my @pg_basebackup_cmd = (@pg_basebackup_defs, '-U', 'backupuser', '-Xfetch');
# Can't use this module without setting basebackup_to_shell.command.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
qr/shell command for backup is not configured/,
'fails if basebackup_to_shell.command is not set');
# Configure basebackup_to_shell.command and reload the configuation file.
-my $backup_path = PostgreSQL::Test::Utils::tempdir;
+my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path;
-$escaped_backup_path =~ s{\\}{\\\\}g if ($PostgreSQL::Test::Utils::windows_os);
+$escaped_backup_path =~ s{\\}{\\\\}g
+ if ($PostgreSQL::Test::Utils::windows_os);
my $shell_command =
- $PostgreSQL::Test::Utils::windows_os
- ? qq{$gzip --fast > "$escaped_backup_path\\\\%f.gz"}
- : qq{$gzip --fast > "$escaped_backup_path/%f.gz"};
+ $PostgreSQL::Test::Utils::windows_os
+ ? qq{$gzip --fast > "$escaped_backup_path\\\\%f.gz"}
+ : qq{$gzip --fast > "$escaped_backup_path/%f.gz"};
$node->append_conf('postgresql.conf',
- "basebackup_to_shell.command='$shell_command'");
+ "basebackup_to_shell.command='$shell_command'");
$node->reload();
# Should work now.
$node->command_ok(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
'backup with no detail: pg_basebackup');
verify_backup('', $backup_path, "backup with no detail");
# Should fail with a detail.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
+ [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
qr/a target detail is not permitted because the configured command does not include %d/,
'fails if detail provided without %d');
# Reconfigure to restrict access and require a detail.
$shell_command =
- $PostgreSQL::Test::Utils::windows_os
- ? qq{$gzip --fast > "$escaped_backup_path\\\\%d.%f.gz"}
- : qq{$gzip --fast > "$escaped_backup_path/%d.%f.gz"};
+ $PostgreSQL::Test::Utils::windows_os
+ ? qq{$gzip --fast > "$escaped_backup_path\\\\%d.%f.gz"}
+ : qq{$gzip --fast > "$escaped_backup_path/%d.%f.gz"};
$node->append_conf('postgresql.conf',
- "basebackup_to_shell.command='$shell_command'");
+ "basebackup_to_shell.command='$shell_command'");
$node->append_conf('postgresql.conf',
- "basebackup_to_shell.required_role='trustworthy'");
+ "basebackup_to_shell.required_role='trustworthy'");
$node->reload();
# Should fail due to lack of permission.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
qr/permission denied to use basebackup_to_shell/,
'fails if required_role not granted');
# Should fail due to lack of a detail.
$node->safe_psql('postgres', 'GRANT trustworthy TO backupuser');
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
qr/a target detail is required because the configured command includes %d/,
'fails if %d is present and detail not given');
# Should work.
-$node->command_ok(
- [ @pg_basebackup_cmd, '--target', 'shell:bar' ],
+$node->command_ok([ @pg_basebackup_cmd, '--target', 'shell:bar' ],
'backup with detail: pg_basebackup');
verify_backup('bar.', $backup_path, "backup with detail");
@@ -105,30 +106,34 @@ sub verify_backup
{
my ($prefix, $backup_dir, $test_name) = @_;
- ok(-f "$backup_dir/${prefix}backup_manifest.gz",
- "$test_name: backup_manifest.gz was created");
- ok(-f "$backup_dir/${prefix}base.tar.gz",
- "$test_name: base.tar.gz was created");
+ ok( -f "$backup_dir/${prefix}backup_manifest.gz",
+ "$test_name: backup_manifest.gz was created");
+ ok( -f "$backup_dir/${prefix}base.tar.gz",
+ "$test_name: base.tar.gz was created");
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
skip "no tar program available", 1 if (!defined $tar || $tar eq '');
# Decompress.
system_or_bail($gzip, '-d',
- $backup_dir . '/' . $prefix . 'backup_manifest.gz');
+ $backup_dir . '/' . $prefix . 'backup_manifest.gz');
system_or_bail($gzip, '-d',
- $backup_dir . '/' . $prefix . 'base.tar.gz');
+ $backup_dir . '/' . $prefix . 'base.tar.gz');
# Untar.
my $extract_path = PostgreSQL::Test::Utils::tempdir;
system_or_bail($tar, 'xf', $backup_dir . '/' . $prefix . 'base.tar',
- '-C', $extract_path);
+ '-C', $extract_path);
# Verify.
- $node->command_ok([ 'pg_verifybackup', '-n',
- '-m', "${backup_dir}/${prefix}backup_manifest",
- '-e', $extract_path ],
- "$test_name: backup verifies ok");
+ $node->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "${backup_dir}/${prefix}backup_manifest",
+ '-e', $extract_path
+ ],
+ "$test_name: backup verifies ok");
}
}
diff --git a/contrib/basic_archive/basic_archive.c b/contrib/basic_archive/basic_archive.c
index e7efbfb9c34..c2173533228 100644
--- a/contrib/basic_archive/basic_archive.c
+++ b/contrib/basic_archive/basic_archive.c
@@ -40,8 +40,8 @@
PG_MODULE_MAGIC;
-void _PG_init(void);
-void _PG_archive_module_init(ArchiveModuleCallbacks *cb);
+void _PG_init(void);
+void _PG_archive_module_init(ArchiveModuleCallbacks *cb);
static char *archive_directory = NULL;
static MemoryContext basic_archive_context;
@@ -102,8 +102,8 @@ check_archive_directory(char **newval, void **extra, GucSource source)
/*
* The default value is an empty string, so we have to accept that value.
- * Our check_configured callback also checks for this and prevents archiving
- * from proceeding if it is still empty.
+ * Our check_configured callback also checks for this and prevents
+ * archiving from proceeding if it is still empty.
*/
if (*newval == NULL || *newval[0] == '\0')
return true;
@@ -119,7 +119,7 @@ check_archive_directory(char **newval, void **extra, GucSource source)
}
/*
- * Do a basic sanity check that the specified archive directory exists. It
+ * Do a basic sanity check that the specified archive directory exists. It
* could be removed at some point in the future, so we still need to be
* prepared for it not to exist in the actual archiving logic.
*/
@@ -155,18 +155,19 @@ basic_archive_file(const char *file, const char *path)
MemoryContext oldcontext;
/*
- * We run basic_archive_file_internal() in our own memory context so that we
- * can easily reset it during error recovery (thus avoiding memory leaks).
+ * We run basic_archive_file_internal() in our own memory context so that
+ * we can easily reset it during error recovery (thus avoiding memory
+ * leaks).
*/
oldcontext = MemoryContextSwitchTo(basic_archive_context);
/*
- * Since the archiver operates at the bottom of the exception stack, ERRORs
- * turn into FATALs and cause the archiver process to restart. However,
- * using ereport(ERROR, ...) when there are problems is easy to code and
- * maintain. Therefore, we create our own exception handler to catch ERRORs
- * and return false instead of restarting the archiver whenever there is a
- * failure.
+ * Since the archiver operates at the bottom of the exception stack,
+ * ERRORs turn into FATALs and cause the archiver process to restart.
+ * However, using ereport(ERROR, ...) when there are problems is easy to
+ * code and maintain. Therefore, we create our own exception handler to
+ * catch ERRORs and return false instead of restarting the archiver
+ * whenever there is a failure.
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
@@ -228,14 +229,14 @@ basic_archive_file_internal(const char *file, const char *path)
snprintf(destination, MAXPGPATH, "%s/%s", archive_directory, file);
/*
- * First, check if the file has already been archived. If it already exists
- * and has the same contents as the file we're trying to archive, we can
- * return success (after ensuring the file is persisted to disk). This
- * scenario is possible if the server crashed after archiving the file but
- * before renaming its .ready file to .done.
+ * First, check if the file has already been archived. If it already
+ * exists and has the same contents as the file we're trying to archive,
+ * we can return success (after ensuring the file is persisted to disk).
+ * This scenario is possible if the server crashed after archiving the
+ * file but before renaming its .ready file to .done.
*
- * If the archive file already exists but has different contents, something
- * might be wrong, so we just fail.
+ * If the archive file already exists but has different contents,
+ * something might be wrong, so we just fail.
*/
if (stat(destination, &st) == 0)
{
@@ -274,8 +275,8 @@ basic_archive_file_internal(const char *file, const char *path)
archive_directory, "archtemp", file, MyProcPid, epoch);
/*
- * Copy the file to its temporary destination. Note that this will fail if
- * temp already exists.
+ * Copy the file to its temporary destination. Note that this will fail
+ * if temp already exists.
*/
copy_file(unconstify(char *, path), temp);
@@ -318,9 +319,9 @@ compare_files(const char *file1, const char *file2)
for (;;)
{
- int nbytes = 0;
- int buf1_len = 0;
- int buf2_len = 0;
+ int nbytes = 0;
+ int buf1_len = 0;
+ int buf2_len = 0;
while (buf1_len < CMP_BUF_SIZE)
{
diff --git a/contrib/btree_gist/btree_bool.c b/contrib/btree_gist/btree_bool.c
index 1be246ea5e1..8b2af129b52 100644
--- a/contrib/btree_gist/btree_bool.c
+++ b/contrib/btree_gist/btree_bool.c
@@ -53,8 +53,8 @@ gbt_boollt(const void *a, const void *b, FmgrInfo *flinfo)
static int
gbt_boolkey_cmp(const void *a, const void *b, FmgrInfo *flinfo)
{
- boolKEY *ia = (boolKEY *) (((const Nsrt *) a)->t);
- boolKEY *ib = (boolKEY *) (((const Nsrt *) b)->t);
+ boolKEY *ia = (boolKEY *) (((const Nsrt *) a)->t);
+ boolKEY *ib = (boolKEY *) (((const Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
diff --git a/contrib/hstore_plpython/hstore_plpython.c b/contrib/hstore_plpython/hstore_plpython.c
index 889ece315df..0be65075916 100644
--- a/contrib/hstore_plpython/hstore_plpython.c
+++ b/contrib/hstore_plpython/hstore_plpython.c
@@ -99,7 +99,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
PyObject *key;
key = PLyUnicode_FromStringAndSize(HSTORE_KEY(entries, base, i),
- HSTORE_KEYLEN(entries, i));
+ HSTORE_KEYLEN(entries, i));
if (HSTORE_VALISNULL(entries, i))
PyDict_SetItem(dict, key, Py_None);
else
@@ -107,7 +107,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
PyObject *value;
value = PLyUnicode_FromStringAndSize(HSTORE_VAL(entries, base, i),
- HSTORE_VALLEN(entries, i));
+ HSTORE_VALLEN(entries, i));
PyDict_SetItem(dict, key, value);
Py_XDECREF(value);
}
diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c
index c2a37277e08..879276e6dec 100644
--- a/contrib/pageinspect/brinfuncs.c
+++ b/contrib/pageinspect/brinfuncs.c
@@ -63,12 +63,12 @@ brin_page_type(PG_FUNCTION_ARGS)
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BrinSpecialSpace)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "BRIN"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(BrinSpecialSpace)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "BRIN"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(BrinSpecialSpace)),
+ (int) PageGetSpecialSize(page))));
switch (BrinPageType(page))
{
@@ -103,12 +103,12 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BrinSpecialSpace)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "BRIN"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(BrinSpecialSpace)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "BRIN"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(BrinSpecialSpace)),
+ (int) PageGetSpecialSize(page))));
/* verify the special space says this page is what we want */
if (BrinPageType(page) != type)
diff --git a/contrib/pageinspect/gistfuncs.c b/contrib/pageinspect/gistfuncs.c
index 9c29fbc7aa6..4943d6f75bd 100644
--- a/contrib/pageinspect/gistfuncs.c
+++ b/contrib/pageinspect/gistfuncs.c
@@ -60,21 +60,21 @@ gist_page_opaque_info(PG_FUNCTION_ARGS)
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
+ (int) PageGetSpecialSize(page))));
opaq = GistPageGetOpaque(page);
if (opaq->gist_page_id != GIST_PAGE_ID)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected %08x, got %08x.",
- GIST_PAGE_ID,
- opaq->gist_page_id)));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected %08x, got %08x.",
+ GIST_PAGE_ID,
+ opaq->gist_page_id)));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
@@ -138,21 +138,21 @@ gist_page_items_bytea(PG_FUNCTION_ARGS)
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
+ (int) PageGetSpecialSize(page))));
opaq = GistPageGetOpaque(page);
if (opaq->gist_page_id != GIST_PAGE_ID)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected %08x, got %08x.",
- GIST_PAGE_ID,
- opaq->gist_page_id)));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected %08x, got %08x.",
+ GIST_PAGE_ID,
+ opaq->gist_page_id)));
/* Avoid bogus PageGetMaxOffsetNumber() call with deleted pages */
if (GistPageIsDeleted(page))
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index ceaad81a433..5c8b9ff9430 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -1533,7 +1533,10 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
- /* Superusers or roles with the privileges of pg_read_all_stats members are allowed */
+ /*
+ * Superusers or roles with the privileges of pg_read_all_stats members
+ * are allowed
+ */
is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS);
/* hash table must exist already */
diff --git a/contrib/pg_walinspect/pg_walinspect.c b/contrib/pg_walinspect/pg_walinspect.c
index cc33fb65d5c..a082dfb3310 100644
--- a/contrib/pg_walinspect/pg_walinspect.c
+++ b/contrib/pg_walinspect/pg_walinspect.c
@@ -47,7 +47,7 @@ static XLogRecPtr ValidateInputLSNs(bool till_end_of_wal,
XLogRecPtr start_lsn, XLogRecPtr end_lsn);
static void GetWALRecordsInfo(FunctionCallInfo fcinfo, XLogRecPtr start_lsn,
XLogRecPtr end_lsn);
-static void GetXLogSummaryStats(XLogStats * stats, ReturnSetInfo *rsinfo,
+static void GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
Datum *values, bool *nulls, uint32 ncols,
bool stats_per_record);
static void FillXLogStatsRow(const char *name, uint64 n, uint64 total_count,
@@ -102,7 +102,7 @@ InitXLogReaderState(XLogRecPtr lsn, XLogRecPtr *first_record)
LSN_FORMAT_ARGS(lsn))));
private_data = (ReadLocalXLogPageNoWaitPrivate *)
- palloc0(sizeof(ReadLocalXLogPageNoWaitPrivate));
+ palloc0(sizeof(ReadLocalXLogPageNoWaitPrivate));
xlogreader = XLogReaderAllocate(wal_segment_size, NULL,
XL_ROUTINE(.page_read = &read_local_xlog_page_no_wait,
@@ -143,7 +143,7 @@ static XLogRecord *
ReadNextXLogRecord(XLogReaderState *xlogreader, XLogRecPtr first_record)
{
XLogRecord *record;
- char *errormsg;
+ char *errormsg;
record = XLogReadRecord(xlogreader, &errormsg);
@@ -153,7 +153,7 @@ ReadNextXLogRecord(XLogReaderState *xlogreader, XLogRecPtr first_record)
/* return NULL, if end of WAL is reached */
private_data = (ReadLocalXLogPageNoWaitPrivate *)
- xlogreader->private_data;
+ xlogreader->private_data;
if (private_data->end_of_wal)
return NULL;
@@ -181,12 +181,12 @@ GetWALRecordInfo(XLogReaderState *record, XLogRecPtr lsn,
Datum *values, bool *nulls, uint32 ncols)
{
const char *id;
- RmgrData desc;
- uint32 fpi_len = 0;
+ RmgrData desc;
+ uint32 fpi_len = 0;
StringInfoData rec_desc;
StringInfoData rec_blk_ref;
- uint32 main_data_len;
- int i = 0;
+ uint32 main_data_len;
+ int i = 0;
desc = GetRmgr(XLogRecGetRmid(record));
id = desc.rm_identify(XLogRecGetInfo(record));
@@ -228,9 +228,9 @@ Datum
pg_get_wal_record_info(PG_FUNCTION_ARGS)
{
#define PG_GET_WAL_RECORD_INFO_COLS 11
- Datum result;
- Datum values[PG_GET_WAL_RECORD_INFO_COLS];
- bool nulls[PG_GET_WAL_RECORD_INFO_COLS];
+ Datum result;
+ Datum values[PG_GET_WAL_RECORD_INFO_COLS];
+ bool nulls[PG_GET_WAL_RECORD_INFO_COLS];
XLogRecPtr lsn;
XLogRecPtr curr_lsn;
XLogRecPtr first_record;
@@ -334,8 +334,8 @@ GetWALRecordsInfo(FunctionCallInfo fcinfo, XLogRecPtr start_lsn,
XLogRecPtr first_record;
XLogReaderState *xlogreader;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- Datum values[PG_GET_WAL_RECORDS_INFO_COLS];
- bool nulls[PG_GET_WAL_RECORDS_INFO_COLS];
+ Datum values[PG_GET_WAL_RECORDS_INFO_COLS];
+ bool nulls[PG_GET_WAL_RECORDS_INFO_COLS];
SetSingleFuncCall(fcinfo, 0);
@@ -418,11 +418,11 @@ FillXLogStatsRow(const char *name,
uint64 tot_len, uint64 total_len,
Datum *values, bool *nulls, uint32 ncols)
{
- double n_pct,
- rec_len_pct,
- fpi_len_pct,
- tot_len_pct;
- int i = 0;
+ double n_pct,
+ rec_len_pct,
+ fpi_len_pct,
+ tot_len_pct;
+ int i = 0;
n_pct = 0;
if (total_count != 0)
@@ -461,11 +461,11 @@ GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
Datum *values, bool *nulls, uint32 ncols,
bool stats_per_record)
{
- uint64 total_count = 0;
- uint64 total_rec_len = 0;
- uint64 total_fpi_len = 0;
- uint64 total_len = 0;
- int ri;
+ uint64 total_count = 0;
+ uint64 total_rec_len = 0;
+ uint64 total_fpi_len = 0;
+ uint64 total_len = 0;
+ int ri;
/*
* Each row shows its percentages of the total, so make a first pass to
@@ -488,7 +488,7 @@ GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
uint64 rec_len;
uint64 fpi_len;
uint64 tot_len;
- RmgrData desc;
+ RmgrData desc;
if (!RmgrIdIsValid(ri))
continue;
@@ -500,7 +500,7 @@ GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
if (stats_per_record)
{
- int rj;
+ int rj;
for (rj = 0; rj < MAX_XLINFO_TYPES; rj++)
{
@@ -556,10 +556,10 @@ GetWalStats(FunctionCallInfo fcinfo, XLogRecPtr start_lsn,
#define PG_GET_WAL_STATS_COLS 9
XLogRecPtr first_record;
XLogReaderState *xlogreader;
- XLogStats stats;
+ XLogStats stats;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- Datum values[PG_GET_WAL_STATS_COLS];
- bool nulls[PG_GET_WAL_STATS_COLS];
+ Datum values[PG_GET_WAL_STATS_COLS];
+ bool nulls[PG_GET_WAL_STATS_COLS];
SetSingleFuncCall(fcinfo, 0);
@@ -599,7 +599,7 @@ pg_get_wal_stats(PG_FUNCTION_ARGS)
{
XLogRecPtr start_lsn;
XLogRecPtr end_lsn;
- bool stats_per_record;
+ bool stats_per_record;
start_lsn = PG_GETARG_LSN(0);
end_lsn = PG_GETARG_LSN(1);
@@ -623,7 +623,7 @@ pg_get_wal_stats_till_end_of_wal(PG_FUNCTION_ARGS)
{
XLogRecPtr start_lsn;
XLogRecPtr end_lsn = InvalidXLogRecPtr;
- bool stats_per_record;
+ bool stats_per_record;
start_lsn = PG_GETARG_LSN(0);
stats_per_record = PG_GETARG_BOOL(1);
diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c
index 53e64297c28..cf315517e0c 100644
--- a/contrib/pgcrypto/openssl.c
+++ b/contrib/pgcrypto/openssl.c
@@ -373,7 +373,8 @@ gen_ossl_decrypt(PX_Cipher *c, int padding, const uint8 *data, unsigned dlen,
uint8 *res, unsigned *rlen)
{
OSSLCipher *od = c->ptr;
- int outlen, outlen2;
+ int outlen,
+ outlen2;
if (!od->init)
{
@@ -402,7 +403,8 @@ gen_ossl_encrypt(PX_Cipher *c, int padding, const uint8 *data, unsigned dlen,
uint8 *res, unsigned *rlen)
{
OSSLCipher *od = c->ptr;
- int outlen, outlen2;
+ int outlen,
+ outlen2;
if (!od->init)
{
diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c
index 30945669081..93b7834b774 100644
--- a/contrib/pgstattuple/pgstattuple.c
+++ b/contrib/pgstattuple/pgstattuple.c
@@ -255,46 +255,46 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo)
if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind) ||
rel->rd_rel->relkind == RELKIND_SEQUENCE)
{
- return pgstat_heap(rel, fcinfo);
+ return pgstat_heap(rel, fcinfo);
}
else if (rel->rd_rel->relkind == RELKIND_INDEX)
{
- switch (rel->rd_rel->relam)
- {
- case BTREE_AM_OID:
- return pgstat_index(rel, BTREE_METAPAGE + 1,
- pgstat_btree_page, fcinfo);
- case HASH_AM_OID:
- return pgstat_index(rel, HASH_METAPAGE + 1,
- pgstat_hash_page, fcinfo);
- case GIST_AM_OID:
- return pgstat_index(rel, GIST_ROOT_BLKNO + 1,
- pgstat_gist_page, fcinfo);
- case GIN_AM_OID:
- err = "gin index";
- break;
- case SPGIST_AM_OID:
- err = "spgist index";
- break;
- case BRIN_AM_OID:
- err = "brin index";
- break;
- default:
- err = "unknown index";
- break;
- }
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("index \"%s\" (%s) is not supported",
- RelationGetRelationName(rel), err)));
+ switch (rel->rd_rel->relam)
+ {
+ case BTREE_AM_OID:
+ return pgstat_index(rel, BTREE_METAPAGE + 1,
+ pgstat_btree_page, fcinfo);
+ case HASH_AM_OID:
+ return pgstat_index(rel, HASH_METAPAGE + 1,
+ pgstat_hash_page, fcinfo);
+ case GIST_AM_OID:
+ return pgstat_index(rel, GIST_ROOT_BLKNO + 1,
+ pgstat_gist_page, fcinfo);
+ case GIN_AM_OID:
+ err = "gin index";
+ break;
+ case SPGIST_AM_OID:
+ err = "spgist index";
+ break;
+ case BRIN_AM_OID:
+ err = "brin index";
+ break;
+ default:
+ err = "unknown index";
+ break;
+ }
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("index \"%s\" (%s) is not supported",
+ RelationGetRelationName(rel), err)));
}
else
{
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot get tuple-level statistics for relation \"%s\"",
- RelationGetRelationName(rel)),
- errdetail_relkind_not_supported(rel->rd_rel->relkind)));
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot get tuple-level statistics for relation \"%s\"",
+ RelationGetRelationName(rel)),
+ errdetail_relkind_not_supported(rel->rd_rel->relkind)));
}
return 0; /* should not happen */
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 541526ab80b..061ffaf329e 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -654,10 +654,10 @@ do_sql_command_end(PGconn *conn, const char *sql, bool consume_input)
PGresult *res;
/*
- * If requested, consume whatever data is available from the socket.
- * (Note that if all data is available, this allows pgfdw_get_result to
- * call PQgetResult without forcing the overhead of WaitLatchOrSocket,
- * which would be large compared to the overhead of PQconsumeInput.)
+ * If requested, consume whatever data is available from the socket. (Note
+ * that if all data is available, this allows pgfdw_get_result to call
+ * PQgetResult without forcing the overhead of WaitLatchOrSocket, which
+ * would be large compared to the overhead of PQconsumeInput.)
*/
if (consume_input && !PQconsumeInput(conn))
pgfdw_report_error(ERROR, NULL, conn, false, sql);
@@ -1560,6 +1560,7 @@ pgfdw_finish_pre_commit_cleanup(List *pending_entries)
entry = (ConnCacheEntry *) lfirst(lc);
Assert(entry->changing_xact_state);
+
/*
* We might already have received the result on the socket, so pass
* consume_input=true to try to consume it first
@@ -1634,6 +1635,7 @@ pgfdw_finish_pre_subcommit_cleanup(List *pending_entries, int curlevel)
entry = (ConnCacheEntry *) lfirst(lc);
Assert(entry->changing_xact_state);
+
/*
* We might already have received the result on the socket, so pass
* consume_input=true to try to consume it first
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 0e5771c89d8..d56951153bb 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -1243,9 +1243,9 @@ postgresGetForeignPlan(PlannerInfo *root,
if (best_path->fdw_private)
{
has_final_sort = boolVal(list_nth(best_path->fdw_private,
- FdwPathPrivateHasFinalSort));
+ FdwPathPrivateHasFinalSort));
has_limit = boolVal(list_nth(best_path->fdw_private,
- FdwPathPrivateHasLimit));
+ FdwPathPrivateHasLimit));
}
if (IS_SIMPLE_REL(foreignrel))
@@ -1926,7 +1926,7 @@ postgresBeginForeignModify(ModifyTableState *mtstate,
values_end_len = intVal(list_nth(fdw_private,
FdwModifyPrivateLen));
has_returning = boolVal(list_nth(fdw_private,
- FdwModifyPrivateHasReturning));
+ FdwModifyPrivateHasReturning));
retrieved_attrs = (List *) list_nth(fdw_private,
FdwModifyPrivateRetrievedAttrs);
@@ -2686,11 +2686,11 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
dmstate->query = strVal(list_nth(fsplan->fdw_private,
FdwDirectModifyPrivateUpdateSql));
dmstate->has_returning = boolVal(list_nth(fsplan->fdw_private,
- FdwDirectModifyPrivateHasReturning));
+ FdwDirectModifyPrivateHasReturning));
dmstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
FdwDirectModifyPrivateRetrievedAttrs);
dmstate->set_processed = boolVal(list_nth(fsplan->fdw_private,
- FdwDirectModifyPrivateSetProcessed));
+ FdwDirectModifyPrivateSetProcessed));
/* Create context for per-tuple temp workspace. */
dmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
index 08d366a594e..3736da6784b 100644
--- a/contrib/test_decoding/test_decoding.c
+++ b/contrib/test_decoding/test_decoding.c
@@ -300,8 +300,8 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
txn->output_plugin_private = txndata;
/*
- * If asked to skip empty transactions, we'll emit BEGIN at the point where
- * the first operation is received for this transaction.
+ * If asked to skip empty transactions, we'll emit BEGIN at the point
+ * where the first operation is received for this transaction.
*/
if (data->skip_empty_xacts)
return;
@@ -360,8 +360,8 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
txn->output_plugin_private = txndata;
/*
- * If asked to skip empty transactions, we'll emit BEGIN at the point where
- * the first operation is received for this transaction.
+ * If asked to skip empty transactions, we'll emit BEGIN at the point
+ * where the first operation is received for this transaction.
*/
if (data->skip_empty_xacts)
return;
diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c
index 7052ac99780..576e585a89f 100644
--- a/src/backend/access/common/toast_internals.c
+++ b/src/backend/access/common/toast_internals.c
@@ -663,9 +663,9 @@ init_toast_snapshot(Snapshot toast_snapshot)
/*
* Catalog snapshots can be returned by GetOldestSnapshot() even if not
* registered or active. That easily hides bugs around not having a
- * snapshot set up - most of the time there is a valid catalog
- * snapshot. So additionally insist that the current snapshot is
- * registered or active.
+ * snapshot set up - most of the time there is a valid catalog snapshot.
+ * So additionally insist that the current snapshot is registered or
+ * active.
*/
Assert(HaveRegisteredOrActiveSnapshot());
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 98d31de0031..9f43bbe25f5 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -68,9 +68,9 @@ typedef struct
/*
* Tuple visibility is only computed once for each tuple, for correctness
- * and efficiency reasons; see comment in heap_page_prune() for
- * details. This is of type int8[,] instead of HTSV_Result[], so we can use
- * -1 to indicate no visibility has been computed, e.g. for LP_DEAD items.
+ * and efficiency reasons; see comment in heap_page_prune() for details.
+ * This is of type int8[], instead of HTSV_Result[], so we can use -1 to
+ * indicate no visibility has been computed, e.g. for LP_DEAD items.
*
* Same indexing as ->marked.
*/
@@ -203,8 +203,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
- int ndeleted,
- nnewlpdead;
+ int ndeleted,
+ nnewlpdead;
ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin,
limited_ts, &nnewlpdead, NULL);
@@ -267,7 +267,7 @@ heap_page_prune(Relation relation, Buffer buffer,
GlobalVisState *vistest,
TransactionId old_snap_xmin,
TimestampTz old_snap_ts,
- int *nnewlpdead,
+ int *nnewlpdead,
OffsetNumber *off_loc)
{
int ndeleted = 0;
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 9482f99e68b..b802ed247e7 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -326,7 +326,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
PGRUsage ru0;
TimestampTz starttime = 0;
PgStat_Counter startreadtime = 0,
- startwritetime = 0;
+ startwritetime = 0;
WalUsage startwalusage = pgWalUsage;
int64 StartPageHit = VacuumPageHit,
StartPageMiss = VacuumPageMiss,
@@ -2232,12 +2232,12 @@ lazy_vacuum(LVRelState *vacrel)
* dead_items space is not CPU cache resident.
*
* We don't take any special steps to remember the LP_DEAD items (such
- * as counting them in our final update to the stats system) when
- * the optimization is applied. Though the accounting used in
- * analyze.c's acquire_sample_rows() will recognize the same LP_DEAD
- * items as dead rows in its own stats report, that's okay.
- * The discrepancy should be negligible. If this optimization is ever
- * expanded to cover more cases then this may need to be reconsidered.
+ * as counting them in our final update to the stats system) when the
+ * optimization is applied. Though the accounting used in analyze.c's
+ * acquire_sample_rows() will recognize the same LP_DEAD items as dead
+ * rows in its own stats report, that's okay. The discrepancy should
+ * be negligible. If this optimization is ever expanded to cover more
+ * cases then this may need to be reconsidered.
*/
threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
bypass = (vacrel->lpdead_item_pages < threshold &&
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index e739c4a3bd9..90b6ac2884d 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -411,8 +411,8 @@ xact_desc_prepare(StringInfo buf, uint8 info, xl_xact_prepare *xlrec, RepOriginI
parsed.tsId, xlrec->initfileinval);
/*
- * Check if the replication origin has been set in this record in the
- * same way as PrepareRedoAdd().
+ * Check if the replication origin has been set in this record in the same
+ * way as PrepareRedoAdd().
*/
if (origin_id != InvalidRepOriginId)
appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c
index c0dfea40c70..fefc563323d 100644
--- a/src/backend/access/rmgrdesc/xlogdesc.c
+++ b/src/backend/access/rmgrdesc/xlogdesc.c
@@ -210,7 +210,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
bool detailed_format, StringInfo buf,
uint32 *fpi_len)
{
- int block_id;
+ int block_id;
Assert(record != NULL);
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index e1d6ebbd3db..8ed69244e39 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -38,7 +38,7 @@
#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \
{ name, redo, desc, identify, startup, cleanup, mask, decode },
-RmgrData RmgrTable[RM_MAX_ID + 1] = {
+RmgrData RmgrTable[RM_MAX_ID + 1] = {
#include "access/rmgrlist.h"
};
@@ -125,8 +125,8 @@ RegisterCustomRmgr(RmgrId rmid, RmgrData *rmgr)
if (!pg_strcasecmp(RmgrTable[existing_rmid].rm_name, rmgr->rm_name))
ereport(ERROR,
- (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
- errdetail("Existing resource manager with ID %d has the same name.", existing_rmid)));
+ (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
+ errdetail("Existing resource manager with ID %d has the same name.", existing_rmid)));
}
/* register it */
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index dc0266693e3..75551f60cbc 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -1119,7 +1119,7 @@ StartPrepare(GlobalTransaction gxact)
if (hdr.nabortstats > 0)
{
save_state_data(abortstats,
- hdr.nabortstats * sizeof(xl_xact_stats_item));
+ hdr.nabortstats * sizeof(xl_xact_stats_item));
pfree(abortstats);
}
if (hdr.ninvalmsgs > 0)
@@ -1529,9 +1529,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
abortrels = (RelFileNode *) bufptr;
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
- commitstats = (xl_xact_stats_item*) bufptr;
+ commitstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
- abortstats = (xl_xact_stats_item*) bufptr;
+ abortstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item));
invalmsgs = (SharedInvalidationMessage *) bufptr;
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 36852f23277..71136b11a2a 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -435,10 +435,10 @@ typedef struct XLogCtlInsert
bool fullPageWrites;
/*
- * runningBackups is a counter indicating the number of backups currently in
- * progress. forcePageWrites is set to true when runningBackups is non-zero.
- * lastBackupStart is the latest checkpoint redo location used as a starting
- * point for an online backup.
+ * runningBackups is a counter indicating the number of backups currently
+ * in progress. forcePageWrites is set to true when runningBackups is
+ * non-zero. lastBackupStart is the latest checkpoint redo location used
+ * as a starting point for an online backup.
*/
int runningBackups;
XLogRecPtr lastBackupStart;
@@ -5307,14 +5307,14 @@ StartupXLOG(void)
* When recovering from a backup (we are in recovery, and archive recovery
* was requested), complain if we did not roll forward far enough to reach
* the point where the database is consistent. For regular online
- * backup-from-primary, that means reaching the end-of-backup WAL record (at
- * which point we reset backupStartPoint to be Invalid), for
+ * backup-from-primary, that means reaching the end-of-backup WAL record
+ * (at which point we reset backupStartPoint to be Invalid), for
* backup-from-replica (which can't inject records into the WAL stream),
* that point is when we reach the minRecoveryPoint in pg_control (which
- * we purposfully copy last when backing up from a replica). For pg_rewind
- * (which creates a backup_label with a method of "pg_rewind") or
- * snapshot-style backups (which don't), backupEndRequired will be set to
- * false.
+ * we purposefully copy last when backing up from a replica). For
+ * pg_rewind (which creates a backup_label with a method of "pg_rewind")
+ * or snapshot-style backups (which don't), backupEndRequired will be set
+ * to false.
*
* Note: it is indeed okay to look at the local variable
* LocalMinRecoveryPoint here, even though ControlFile->minRecoveryPoint
@@ -5328,8 +5328,8 @@ StartupXLOG(void)
/*
* Ran off end of WAL before reaching end-of-backup WAL record, or
* minRecoveryPoint. That's a bad sign, indicating that you tried to
- * recover from an online backup but never called pg_backup_stop(),
- * or you didn't archive all the WAL needed.
+ * recover from an online backup but never called pg_backup_stop(), or
+ * you didn't archive all the WAL needed.
*/
if (ArchiveRecoveryRequested || ControlFile->backupEndRequired)
{
@@ -8481,8 +8481,8 @@ do_pg_backup_stop(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
WALInsertLockAcquireExclusive();
/*
- * It is expected that each do_pg_backup_start() call is matched by exactly
- * one do_pg_backup_stop() call.
+ * It is expected that each do_pg_backup_start() call is matched by
+ * exactly one do_pg_backup_stop() call.
*/
Assert(XLogCtl->Insert.runningBackups > 0);
XLogCtl->Insert.runningBackups--;
diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c
index a2657a20058..4101a30e374 100644
--- a/src/backend/access/transam/xlogarchive.c
+++ b/src/backend/access/transam/xlogarchive.c
@@ -497,15 +497,15 @@ XLogArchiveNotify(const char *xlog)
}
/*
- * Timeline history files are given the highest archival priority to
- * lower the chance that a promoted standby will choose a timeline that
- * is already in use. However, the archiver ordinarily tries to gather
+ * Timeline history files are given the highest archival priority to lower
+ * the chance that a promoted standby will choose a timeline that is
+ * already in use. However, the archiver ordinarily tries to gather
* multiple files to archive from each scan of the archive_status
- * directory, which means that newly created timeline history files
- * could be left unarchived for a while. To ensure that the archiver
- * picks up timeline history files as soon as possible, we force the
- * archiver to scan the archive_status directory the next time it looks
- * for a file to archive.
+ * directory, which means that newly created timeline history files could
+ * be left unarchived for a while. To ensure that the archiver picks up
+ * timeline history files as soon as possible, we force the archiver to
+ * scan the archive_status directory the next time it looks for a file to
+ * archive.
*/
if (IsTLHistoryFileName(xlog))
PgArchForceDirScan();
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index b61ae6c0b4a..02bd919ff64 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -74,8 +74,8 @@ pg_backup_start(PG_FUNCTION_ARGS)
errmsg("a backup is already in progress in this session")));
/*
- * Label file and tablespace map file need to be long-lived, since
- * they are read in pg_backup_stop.
+ * Label file and tablespace map file need to be long-lived, since they
+ * are read in pg_backup_stop.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo();
@@ -127,8 +127,8 @@ pg_backup_stop(PG_FUNCTION_ARGS)
errhint("Did you call pg_backup_start()?")));
/*
- * Stop the backup. Return a copy of the backup label and tablespace map so
- * they can be written to disk by the caller.
+ * Stop the backup. Return a copy of the backup label and tablespace map
+ * so they can be written to disk by the caller.
*/
stoppoint = do_pg_backup_stop(label_file->data, waitforarchive, NULL);
diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c
index 39ef865ed92..6eba6264202 100644
--- a/src/backend/access/transam/xlogrecovery.c
+++ b/src/backend/access/transam/xlogrecovery.c
@@ -1205,9 +1205,9 @@ read_backup_label(XLogRecPtr *checkPointLoc, TimeLineID *backupLabelTLI,
* method was used) or if this label came from somewhere else (the only
* other option today being from pg_rewind). If this was a streamed
* backup then we know that we need to play through until we get to the
- * end of the WAL which was generated during the backup (at which point
- * we will have reached consistency and backupEndRequired will be reset
- * to be false).
+ * end of the WAL which was generated during the backup (at which point we
+ * will have reached consistency and backupEndRequired will be reset to be
+ * false).
*/
if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
{
@@ -2055,10 +2055,9 @@ CheckRecoveryConsistency(void)
/*
* Have we passed our safe starting point? Note that minRecoveryPoint is
- * known to be incorrectly set if recovering from a backup, until
- * the XLOG_BACKUP_END arrives to advise us of the correct
- * minRecoveryPoint. All we know prior to that is that we're not
- * consistent yet.
+ * known to be incorrectly set if recovering from a backup, until the
+ * XLOG_BACKUP_END arrives to advise us of the correct minRecoveryPoint.
+ * All we know prior to that is that we're not consistent yet.
*/
if (!reachedConsistency && !backupEndRequired &&
minRecoveryPoint <= lastReplayedEndRecPtr)
@@ -3802,7 +3801,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
HandleStartupProcInterrupts();
}
- return XLREAD_FAIL; /* not reached */
+ return XLREAD_FAIL; /* not reached */
}
diff --git a/src/backend/access/transam/xlogstats.c b/src/backend/access/transam/xlogstats.c
index 6524a1ad0b9..514181792dc 100644
--- a/src/backend/access/transam/xlogstats.c
+++ b/src/backend/access/transam/xlogstats.c
@@ -22,7 +22,7 @@ void
XLogRecGetLen(XLogReaderState *record, uint32 *rec_len,
uint32 *fpi_len)
{
- int block_id;
+ int block_id;
/*
* Calculate the amount of FPI data in the record.
@@ -53,10 +53,10 @@ XLogRecGetLen(XLogReaderState *record, uint32 *rec_len,
void
XLogRecStoreStats(XLogStats *stats, XLogReaderState *record)
{
- RmgrId rmid;
- uint8 recid;
- uint32 rec_len;
- uint32 fpi_len;
+ RmgrId rmid;
+ uint8 recid;
+ uint32 rec_len;
+ uint32 fpi_len;
Assert(stats != NULL && record != NULL);
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 29419c10a88..48516694f08 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -80,10 +80,9 @@ typedef struct xl_invalid_page
static HTAB *invalid_page_tab = NULL;
-static int
-read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
- int reqLen, XLogRecPtr targetRecPtr,
- char *cur_page, bool wait_for_wal);
+static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
+ int reqLen, XLogRecPtr targetRecPtr,
+ char *cur_page, bool wait_for_wal);
/* Report a reference to an invalid page */
static void
@@ -940,8 +939,8 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
* archive in the timeline will get renamed to .partial by
* StartupXLOG().
*
- * If that happens after our caller determined the TLI but before
- * we actually read the xlog page, we might still try to read from the
+ * If that happens after our caller determined the TLI but before we
+ * actually read the xlog page, we might still try to read from the
* old (now renamed) segment and fail. There's not much we can do
* about this, but it can only happen when we're a leaf of a cascading
* standby whose primary gets promoted while we're decoding, so a
@@ -965,7 +964,7 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
* end of WAL has been reached.
*/
private_data = (ReadLocalXLogPageNoWaitPrivate *)
- state->private_data;
+ state->private_data;
private_data->end_of_wal = true;
break;
}
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index ece0a934f05..e91a8e10a8d 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -41,12 +41,12 @@ sub ParseHeader
my $is_varlen = 0;
my $is_client_code = 0;
- $catalog{columns} = [];
- $catalog{toasting} = [];
- $catalog{indexing} = [];
- $catalog{other_oids} = [];
+ $catalog{columns} = [];
+ $catalog{toasting} = [];
+ $catalog{indexing} = [];
+ $catalog{other_oids} = [];
$catalog{foreign_keys} = [];
- $catalog{client_code} = [];
+ $catalog{client_code} = [];
open(my $ifh, '<', $input_file) || die "$input_file: $!";
@@ -96,7 +96,9 @@ sub ParseHeader
push @{ $catalog{toasting} },
{ parent_table => $1, toast_oid => $2, toast_index_oid => $3 };
}
- elsif (/^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/)
+ elsif (
+ /^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/
+ )
{
push @{ $catalog{toasting} },
{
@@ -108,16 +110,17 @@ sub ParseHeader
};
}
elsif (
- /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/)
+ /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/
+ )
{
push @{ $catalog{indexing} },
{
is_unique => $1 ? 1 : 0,
is_pkey => $2 ? 1 : 0,
- index_name => $3,
- index_oid => $4,
+ index_name => $3,
+ index_oid => $4,
index_oid_macro => $5,
- index_decl => $6
+ index_decl => $6
};
}
elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/)
diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl
index f4ec6d6d40c..17b2c5e3f3d 100644
--- a/src/backend/catalog/genbki.pl
+++ b/src/backend/catalog/genbki.pl
@@ -814,7 +814,7 @@ Catalog::RenameTempFile($schemafile, $tmpext);
Catalog::RenameTempFile($fk_info_file, $tmpext);
Catalog::RenameTempFile($constraints_file, $tmpext);
-exit ($num_errors != 0 ? 1 : 0);
+exit($num_errors != 0 ? 1 : 0);
#################### Subroutines ########################
@@ -916,11 +916,11 @@ sub morph_row_for_pgattr
# Copy the type data from pg_type, and add some type-dependent items
my $type = $types{$atttype};
- $row->{atttypid} = $type->{oid};
- $row->{attlen} = $type->{typlen};
- $row->{attbyval} = $type->{typbyval};
- $row->{attalign} = $type->{typalign};
- $row->{attstorage} = $type->{typstorage};
+ $row->{atttypid} = $type->{oid};
+ $row->{attlen} = $type->{typlen};
+ $row->{attbyval} = $type->{typbyval};
+ $row->{attalign} = $type->{typalign};
+ $row->{attstorage} = $type->{typstorage};
# set attndims if it's an array type
$row->{attndims} = $type->{typcategory} eq 'A' ? '1' : '0';
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 9b512ccd3c0..800f85ed7db 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -1198,7 +1198,7 @@ heap_create_with_catalog(const char *relname,
if (!OidIsValid(binary_upgrade_next_toast_pg_class_relfilenode))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("toast relfilenode value not set when in binary upgrade mode")));
+ errmsg("toast relfilenode value not set when in binary upgrade mode")));
relfilenode = binary_upgrade_next_toast_pg_class_relfilenode;
binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid;
@@ -1265,8 +1265,8 @@ heap_create_with_catalog(const char *relname,
* remove the disk file again.)
*
* NB: Note that passing create_storage = true is correct even for binary
- * upgrade. The storage we create here will be replaced later, but we need
- * to have something on disk in the meanwhile.
+ * upgrade. The storage we create here will be replaced later, but we
+ * need to have something on disk in the meanwhile.
*/
new_rel_desc = heap_create(relname,
relnamespace,
@@ -3219,9 +3219,8 @@ restart:
/*
* If this constraint has a parent constraint which we have not seen
* yet, keep track of it for the second loop, below. Tracking parent
- * constraints allows us to climb up to the top-level constraint
- * and look for all possible relations referencing the partitioned
- * table.
+ * constraints allows us to climb up to the top-level constraint and
+ * look for all possible relations referencing the partitioned table.
*/
if (OidIsValid(con->conparentid) &&
!list_member_oid(parent_cons, con->conparentid))
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 7539742c782..bdd3c348417 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -928,9 +928,9 @@ index_create(Relation heapRelation,
binary_upgrade_next_index_pg_class_relfilenode = InvalidOid;
/*
- * Note that we want create_storage = true for binary upgrade.
- * The storage we create here will be replaced later, but we need
- * to have something on disk in the meanwhile.
+ * Note that we want create_storage = true for binary upgrade. The
+ * storage we create here will be replaced later, but we need to
+ * have something on disk in the meanwhile.
*/
Assert(create_storage);
}
diff --git a/src/backend/catalog/objectaccess.c b/src/backend/catalog/objectaccess.c
index 38922294e28..1c51df02d21 100644
--- a/src/backend/catalog/objectaccess.c
+++ b/src/backend/catalog/objectaccess.c
@@ -156,7 +156,7 @@ RunFunctionExecuteHook(Oid objectId)
*/
void
RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId,
- bool is_internal)
+ bool is_internal)
{
ObjectAccessPostCreate pc_arg;
@@ -167,8 +167,8 @@ RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId,
pc_arg.is_internal = is_internal;
(*object_access_hook_str) (OAT_POST_CREATE,
- classId, objectName, subId,
- (void *) &pc_arg);
+ classId, objectName, subId,
+ (void *) &pc_arg);
}
/*
@@ -178,7 +178,7 @@ RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId,
*/
void
RunObjectDropHookStr(Oid classId, const char *objectName, int subId,
- int dropflags)
+ int dropflags)
{
ObjectAccessDrop drop_arg;
@@ -189,8 +189,8 @@ RunObjectDropHookStr(Oid classId, const char *objectName, int subId,
drop_arg.dropflags = dropflags;
(*object_access_hook_str) (OAT_DROP,
- classId, objectName, subId,
- (void *) &drop_arg);
+ classId, objectName, subId,
+ (void *) &drop_arg);
}
/*
@@ -205,8 +205,8 @@ RunObjectTruncateHookStr(const char *objectName)
Assert(object_access_hook_str != NULL);
(*object_access_hook_str) (OAT_TRUNCATE,
- RelationRelationId, objectName, 0,
- NULL);
+ RelationRelationId, objectName, 0,
+ NULL);
}
/*
@@ -216,7 +216,7 @@ RunObjectTruncateHookStr(const char *objectName)
*/
void
RunObjectPostAlterHookStr(Oid classId, const char *objectName, int subId,
- Oid auxiliaryId, bool is_internal)
+ Oid auxiliaryId, bool is_internal)
{
ObjectAccessPostAlter pa_arg;
@@ -228,8 +228,8 @@ RunObjectPostAlterHookStr(Oid classId, const char *objectName, int subId,
pa_arg.is_internal = is_internal;
(*object_access_hook_str) (OAT_POST_ALTER,
- classId, objectName, subId,
- (void *) &pa_arg);
+ classId, objectName, subId,
+ (void *) &pa_arg);
}
/*
@@ -250,8 +250,8 @@ RunNamespaceSearchHookStr(const char *objectName, bool ereport_on_violation)
ns_arg.result = true;
(*object_access_hook_str) (OAT_NAMESPACE_SEARCH,
- NamespaceRelationId, objectName, 0,
- (void *) &ns_arg);
+ NamespaceRelationId, objectName, 0,
+ (void *) &ns_arg);
return ns_arg.result;
}
@@ -268,6 +268,6 @@ RunFunctionExecuteHookStr(const char *objectName)
Assert(object_access_hook_str != NULL);
(*object_access_hook_str) (OAT_FUNCTION_EXECUTE,
- ProcedureRelationId, objectName, 0,
- NULL);
+ ProcedureRelationId, objectName, 0,
+ NULL);
}
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 472dbda2116..489f0b2818e 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -145,7 +145,7 @@ CreateConstraintEntry(const char *constraintName,
for (i = 0; i < numFkDeleteSetCols; i++)
fkdatums[i] = Int16GetDatum(fkDeleteSetCols[i]);
confdelsetcolsArray = construct_array(fkdatums, numFkDeleteSetCols,
- INT2OID, 2, true, TYPALIGN_SHORT);
+ INT2OID, 2, true, TYPALIGN_SHORT);
}
else
confdelsetcolsArray = NULL;
@@ -1291,7 +1291,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks,
}
else
{
- int num_delete_cols;
+ int num_delete_cols;
arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
if (ARR_NDIM(arr) != 1 ||
@@ -1301,7 +1301,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks,
num_delete_cols = ARR_DIMS(arr)[0];
memcpy(fk_del_set_cols, ARR_DATA_PTR(arr), num_delete_cols * sizeof(int16));
if ((Pointer) arr != DatumGetPointer(adatum))
- pfree(arr); /* free de-toasted copy, if any */
+ pfree(arr); /* free de-toasted copy, if any */
*num_fk_del_set_cols = num_delete_cols;
}
diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c
index 2631558ff11..e2c8bcb2797 100644
--- a/src/backend/catalog/pg_publication.c
+++ b/src/backend/catalog/pg_publication.c
@@ -378,9 +378,9 @@ publication_add_relation(Oid pubid, PublicationRelInfo *pri,
check_publication_add_relation(targetrel);
/*
- * Translate column names to attnums and make sure the column list contains
- * only allowed elements (no system or generated columns etc.). Also build
- * an array of attnums, for storing in the catalog.
+ * Translate column names to attnums and make sure the column list
+ * contains only allowed elements (no system or generated columns etc.).
+ * Also build an array of attnums, for storing in the catalog.
*/
publication_translate_columns(pri->relation, pri->columns,
&natts, &attarray);
@@ -555,11 +555,11 @@ pub_collist_to_bitmapset(Bitmapset *columns, Datum pubcols, MemoryContext mcxt)
ArrayType *arr;
int nelems;
int16 *elems;
- MemoryContext oldcxt = NULL;
+ MemoryContext oldcxt = NULL;
/*
- * If an existing bitmap was provided, use it. Otherwise just use NULL
- * and build a new bitmap.
+ * If an existing bitmap was provided, use it. Otherwise just use NULL and
+ * build a new bitmap.
*/
if (columns)
result = columns;
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index e4d000d4fe8..cd31e68e95e 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -340,13 +340,13 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
* is in progress.
*
* The truncation operation might drop buffers that the checkpoint
- * otherwise would have flushed. If it does, then it's essential that
- * the files actually get truncated on disk before the checkpoint record
- * is written. Otherwise, if reply begins from that checkpoint, the
+ * otherwise would have flushed. If it does, then it's essential that the
+ * files actually get truncated on disk before the checkpoint record is
+ * written. Otherwise, if reply begins from that checkpoint, the
* to-be-truncated blocks might still exist on disk but have older
- * contents than expected, which can cause replay to fail. It's OK for
- * the blocks to not exist on disk at all, but not for them to have the
- * wrong contents.
+ * contents than expected, which can cause replay to fail. It's OK for the
+ * blocks to not exist on disk at all, but not for them to have the wrong
+ * contents.
*/
Assert((MyProc->delayChkptFlags & DELAY_CHKPT_COMPLETE) == 0);
MyProc->delayChkptFlags |= DELAY_CHKPT_COMPLETE;
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 305226692a4..2da6b75a155 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -429,7 +429,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
*/
if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- List *idxs = RelationGetIndexList(onerel);
+ List *idxs = RelationGetIndexList(onerel);
Irel = NULL;
nindexes = 0;
@@ -680,10 +680,10 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
}
/*
- * Now report ANALYZE to the cumulative stats system. For regular tables, we do
- * it only if not doing inherited stats. For partitioned tables, we only
- * do it for inherited stats. (We're never called for not-inherited stats
- * on partitioned tables anyway.)
+ * Now report ANALYZE to the cumulative stats system. For regular tables,
+ * we do it only if not doing inherited stats. For partitioned tables, we
+ * only do it for inherited stats. (We're never called for not-inherited
+ * stats on partitioned tables anyway.)
*
* Reset the changes_since_analyze counter only if we analyzed all
* columns; otherwise, there is still work for auto-analyze to do.
diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c
index 346f85f05ea..fcfc02d2aed 100644
--- a/src/backend/commands/collationcmds.c
+++ b/src/backend/commands/collationcmds.c
@@ -246,8 +246,9 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e
/*
* Nondeterministic collations are currently only supported with ICU
- * because that's the only case where it can actually make a difference.
- * So we can save writing the code for the other providers.
+ * because that's the only case where it can actually make a
+ * difference. So we can save writing the code for the other
+ * providers.
*/
if (!collisdeterministic && collprovider != COLLPROVIDER_ICU)
ereport(ERROR,
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 689713ea580..f448d39c7ed 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -345,7 +345,7 @@ defGetCopyHeaderChoice(DefElem *def)
break;
default:
{
- char *sval = defGetString(def);
+ char *sval = defGetString(def);
/*
* The set of strings accepted here should match up with the
@@ -365,8 +365,8 @@ defGetCopyHeaderChoice(DefElem *def)
break;
}
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("%s requires a Boolean value or \"match\"",
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("%s requires a Boolean value or \"match\"",
def->defname)));
return COPY_HEADER_FALSE; /* keep compiler quiet */
}
diff --git a/src/backend/commands/copyfromparse.c b/src/backend/commands/copyfromparse.c
index 58017ec53b0..edb80e2cd52 100644
--- a/src/backend/commands/copyfromparse.c
+++ b/src/backend/commands/copyfromparse.c
@@ -800,7 +800,8 @@ NextCopyFromRawFields(CopyFromState cstate, char ***fields, int *nfields)
errmsg("column name mismatch in header line field %d: got null value (\"%s\"), expected \"%s\"",
fldnum, cstate->opts.null_print, NameStr(attr->attname))));
- if (namestrcmp(&attr->attname, colName) != 0) {
+ if (namestrcmp(&attr->attname, colName) != 0)
+ {
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("column name mismatch in header line field %d: got \"%s\", expected \"%s\"",
diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c
index 643bbf286e5..fca29a9a105 100644
--- a/src/backend/commands/copyto.c
+++ b/src/backend/commands/copyto.c
@@ -439,8 +439,8 @@ BeginCopyTo(ParseState *pstate,
* locks on the source table(s).
*/
rewritten = pg_analyze_and_rewrite_fixedparams(raw_query,
- pstate->p_sourcetext, NULL, 0,
- NULL);
+ pstate->p_sourcetext, NULL, 0,
+ NULL);
/* check that we got back something we can work with */
if (rewritten == NIL)
@@ -862,7 +862,7 @@ DoCopyTo(CopyToState cstate)
if (cstate->opts.csv_mode)
CopyAttributeOutCSV(cstate, colname, false,
- list_length(cstate->attnumlist) == 1);
+ list_length(cstate->attnumlist) == 1);
else
CopyAttributeOutText(cstate, colname);
}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 6da58437c58..f2691684010 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -201,9 +201,9 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
*
* We typically do not read relation data into shared_buffers without
* holding a relation lock. It's unclear what could go wrong if we
- * skipped it in this case, because nobody can be modifying either
- * the source or destination database at this point, and we have locks
- * on both databases, too, but let's take the conservative route.
+ * skipped it in this case, because nobody can be modifying either the
+ * source or destination database at this point, and we have locks on
+ * both databases, too, but let's take the conservative route.
*/
dstrelid.relId = srcrelid.relId = relinfo->reloid;
LockRelationId(&srcrelid, AccessShareLock);
@@ -274,9 +274,9 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
/*
* We can't use a real relcache entry for a relation in some other
- * database, but since we're only going to access the fields related
- * to physical storage, a fake one is good enough. If we didn't do this
- * and used the smgr layer directly, we would have to worry about
+ * database, but since we're only going to access the fields related to
+ * physical storage, a fake one is good enough. If we didn't do this and
+ * used the smgr layer directly, we would have to worry about
* invalidations.
*/
rel = CreateFakeRelcacheEntry(rnode);
@@ -333,10 +333,10 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
char *srcpath, List *rnodelist,
Snapshot snapshot)
{
- BlockNumber blkno = BufferGetBlockNumber(buf);
- OffsetNumber offnum;
- OffsetNumber maxoff;
- HeapTupleData tuple;
+ BlockNumber blkno = BufferGetBlockNumber(buf);
+ OffsetNumber offnum;
+ OffsetNumber maxoff;
+ HeapTupleData tuple;
maxoff = PageGetMaxOffsetNumber(page);
@@ -368,10 +368,10 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
CreateDBRelInfo *relinfo;
/*
- * ScanSourceDatabasePgClassTuple is in charge of constructing
- * a CreateDBRelInfo object for this tuple, but can also decide
- * that this tuple isn't something we need to copy. If we do need
- * to copy the relation, add it to the list.
+ * ScanSourceDatabasePgClassTuple is in charge of constructing a
+ * CreateDBRelInfo object for this tuple, but can also decide that
+ * this tuple isn't something we need to copy. If we do need to
+ * copy the relation, add it to the list.
*/
relinfo = ScanSourceDatabasePgClassTuple(&tuple, tbid, dbid,
srcpath);
@@ -395,9 +395,9 @@ CreateDBRelInfo *
ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
char *srcpath)
{
- CreateDBRelInfo *relinfo;
- Form_pg_class classForm;
- Oid relfilenode = InvalidOid;
+ CreateDBRelInfo *relinfo;
+ Form_pg_class classForm;
+ Oid relfilenode = InvalidOid;
classForm = (Form_pg_class) GETSTRUCT(tuple);
@@ -406,11 +406,11 @@ ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
*
* Shared objects don't need to be copied, because they are shared.
* Objects without storage can't be copied, because there's nothing to
- * copy. Temporary relations don't need to be copied either, because
- * they are inaccessible outside of the session that created them,
- * which must be gone already, and couldn't connect to a different database
- * if it still existed. autovacuum will eventually remove the pg_class
- * entries as well.
+ * copy. Temporary relations don't need to be copied either, because they
+ * are inaccessible outside of the session that created them, which must
+ * be gone already, and couldn't connect to a different database if it
+ * still existed. autovacuum will eventually remove the pg_class entries
+ * as well.
*/
if (classForm->reltablespace == GLOBALTABLESPACE_OID ||
!RELKIND_HAS_STORAGE(classForm->relkind) ||
@@ -702,7 +702,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
DefElem *dcollate = NULL;
DefElem *dctype = NULL;
DefElem *diculocale = NULL;
- DefElem *dlocprovider = NULL;
+ DefElem *dlocprovider = NULL;
DefElem *distemplate = NULL;
DefElem *dallowconnections = NULL;
DefElem *dconnlimit = NULL;
@@ -824,10 +824,10 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
/*
* We don't normally permit new databases to be created with
* system-assigned OIDs. pg_upgrade tries to preserve database
- * OIDs, so we can't allow any database to be created with an
- * OID that might be in use in a freshly-initialized cluster
- * created by some future version. We assume all such OIDs will
- * be from the system-managed OID range.
+ * OIDs, so we can't allow any database to be created with an OID
+ * that might be in use in a freshly-initialized cluster created
+ * by some future version. We assume all such OIDs will be from
+ * the system-managed OID range.
*
* As an exception, however, we permit any OID to be assigned when
* allow_system_table_mods=on (so that initdb can assign system
@@ -1348,15 +1348,15 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
InvokeObjectPostCreateHook(DatabaseRelationId, dboid, 0);
/*
- * If we're going to be reading data for the to-be-created database
- * into shared_buffers, take a lock on it. Nobody should know that this
+ * If we're going to be reading data for the to-be-created database into
+ * shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that a
* lock an AccessExclusiveLock on the database is sufficient to drop all
* of its buffers without worrying about more being read later.
*
- * Note that we need to do this before entering the PG_ENSURE_ERROR_CLEANUP
- * block below, because createdb_failure_callback expects this lock to
- * be held already.
+ * Note that we need to do this before entering the
+ * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
+ * expects this lock to be held already.
*/
if (dbstrategy == CREATEDB_WAL_LOG)
LockSharedObject(DatabaseRelationId, dboid, 0, AccessShareLock);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index d2a24798220..c461061fe9e 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3833,7 +3833,7 @@ ExplainTargetRel(Plan *plan, Index rti, ExplainState *es)
if (rte->tablefunc)
if (rte->tablefunc->functype == TFT_XMLTABLE)
objectname = "xmltable";
- else /* Must be TFT_JSON_TABLE */
+ else /* Must be TFT_JSON_TABLE */
objectname = "json_table";
else
objectname = NULL;
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index 1013790dbb3..767d9b96190 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -758,10 +758,10 @@ execute_sql_string(const char *sql)
CommandCounterIncrement();
stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
- sql,
- NULL,
- 0,
- NULL);
+ sql,
+ NULL,
+ 0,
+ NULL);
stmt_list = pg_plan_queries(stmt_list, sql, CURSOR_OPT_PARALLEL_OK, NULL);
foreach(lc2, stmt_list)
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index 52534f18274..d1ee1064652 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -332,8 +332,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
/*
* Inform cumulative stats system about our activity: basically, we
* truncated the matview and inserted some new data. (The concurrent
- * code path above doesn't need to worry about this because the inserts
- * and deletes it issues get counted by lower-level code.)
+ * code path above doesn't need to worry about this because the
+ * inserts and deletes it issues get counted by lower-level code.)
*/
pgstat_count_truncate(matviewRel);
if (!stmt->skipData)
diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c
index 6df0e6670fd..8e645741e4e 100644
--- a/src/backend/commands/publicationcmds.c
+++ b/src/backend/commands/publicationcmds.c
@@ -297,7 +297,7 @@ contain_invalid_rfcolumn_walker(Node *node, rf_context *context)
*/
bool
pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
- bool pubviaroot)
+ bool pubviaroot)
{
HeapTuple rftuple;
Oid relid = RelationGetRelid(relation);
@@ -373,7 +373,7 @@ pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
*/
bool
pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
- bool pubviaroot)
+ bool pubviaroot)
{
HeapTuple tuple;
Oid relid = RelationGetRelid(relation);
@@ -384,8 +384,8 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
/*
* For a partition, if pubviaroot is true, find the topmost ancestor that
- * is published via this publication as we need to use its column list
- * for the changes.
+ * is published via this publication as we need to use its column list for
+ * the changes.
*
* Note that even though the column list used is for an ancestor, the
* REPLICA IDENTITY used will be for the actual child table.
@@ -399,19 +399,19 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
}
tuple = SearchSysCache2(PUBLICATIONRELMAP,
- ObjectIdGetDatum(publish_as_relid),
- ObjectIdGetDatum(pubid));
+ ObjectIdGetDatum(publish_as_relid),
+ ObjectIdGetDatum(pubid));
if (!HeapTupleIsValid(tuple))
return false;
datum = SysCacheGetAttr(PUBLICATIONRELMAP, tuple,
- Anum_pg_publication_rel_prattrs,
- &isnull);
+ Anum_pg_publication_rel_prattrs,
+ &isnull);
if (!isnull)
{
- int x;
+ int x;
Bitmapset *idattrs;
Bitmapset *columns = NULL;
@@ -429,8 +429,9 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
/*
* Attnums in the bitmap returned by RelationGetIndexAttrBitmap are
* offset (to handle system columns the usual way), while column list
- * does not use offset, so we can't do bms_is_subset(). Instead, we have
- * to loop over the idattrs and check all of them are in the list.
+ * does not use offset, so we can't do bms_is_subset(). Instead, we
+ * have to loop over the idattrs and check all of them are in the
+ * list.
*/
x = -1;
while ((x = bms_next_member(idattrs, x)) >= 0)
@@ -440,14 +441,14 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
/*
* If pubviaroot is true, we are validating the column list of the
* parent table, but the bitmap contains the replica identity
- * information of the child table. The parent/child attnums may not
- * match, so translate them to the parent - get the attname from
- * the child, and look it up in the parent.
+ * information of the child table. The parent/child attnums may
+ * not match, so translate them to the parent - get the attname
+ * from the child, and look it up in the parent.
*/
if (pubviaroot)
{
/* attribute name in the child table */
- char *colname = get_attname(relid, attnum, false);
+ char *colname = get_attname(relid, attnum, false);
/*
* Determine the attnum for the attribute name in parent (we
@@ -720,7 +721,7 @@ TransformPubWhereClauses(List *tables, const char *queryString,
*/
static void
CheckPubRelationColumnList(List *tables, const char *queryString,
- bool pubviaroot)
+ bool pubviaroot)
{
ListCell *lc;
@@ -864,7 +865,7 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt)
publish_via_partition_root);
CheckPubRelationColumnList(rels, pstate->p_sourcetext,
- publish_via_partition_root);
+ publish_via_partition_root);
PublicationAddTables(puboid, rels, true, NULL);
CloseTableList(rels);
@@ -1198,8 +1199,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup,
/* Transform the int2vector column list to a bitmap. */
columnListDatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,
- Anum_pg_publication_rel_prattrs,
- &isnull);
+ Anum_pg_publication_rel_prattrs,
+ &isnull);
if (!isnull)
oldcolumns = pub_collist_to_bitmapset(NULL, columnListDatum, NULL);
@@ -1210,15 +1211,15 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup,
foreach(newlc, rels)
{
PublicationRelInfo *newpubrel;
- Oid newrelid;
- Bitmapset *newcolumns = NULL;
+ Oid newrelid;
+ Bitmapset *newcolumns = NULL;
newpubrel = (PublicationRelInfo *) lfirst(newlc);
newrelid = RelationGetRelid(newpubrel->relation);
/*
- * If the new publication has column list, transform it to
- * a bitmap too.
+ * If the new publication has column list, transform it to a
+ * bitmap too.
*/
if (newpubrel->columns)
{
diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c
index 54a190722df..2e716743dd6 100644
--- a/src/backend/commands/statscmds.c
+++ b/src/backend/commands/statscmds.c
@@ -258,9 +258,9 @@ CreateStatistics(CreateStatsStmt *stmt)
nattnums++;
ReleaseSysCache(atttuple);
}
- else if (IsA(selem->expr, Var)) /* column reference in parens */
+ else if (IsA(selem->expr, Var)) /* column reference in parens */
{
- Var *var = (Var *) selem->expr;
+ Var *var = (Var *) selem->expr;
TypeCacheEntry *type;
/* Disallow use of system attributes in extended stats */
@@ -297,10 +297,11 @@ CreateStatistics(CreateStatsStmt *stmt)
while ((k = bms_next_member(attnums, k)) >= 0)
{
AttrNumber attnum = k + FirstLowInvalidHeapAttributeNumber;
+
if (attnum <= 0)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("statistics creation on system columns is not supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("statistics creation on system columns is not supported")));
}
/*
@@ -511,9 +512,9 @@ CreateStatistics(CreateStatsStmt *stmt)
relation_close(statrel, RowExclusiveLock);
/*
- * We used to create the pg_statistic_ext_data tuple too, but it's not clear
- * what value should the stxdinherit flag have (it depends on whether the rel
- * is partitioned, contains data, etc.)
+ * We used to create the pg_statistic_ext_data tuple too, but it's not
+ * clear what value should the stxdinherit flag have (it depends on
+ * whether the rel is partitioned, contains data, etc.)
*/
InvokeObjectPostCreateHook(StatisticExtRelationId, statoid, 0);
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index b94236f74d3..690cdaa426e 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -1578,13 +1578,13 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
PG_END_TRY();
/*
- * Tell the cumulative stats system that the subscription is getting dropped.
- * We can safely report dropping the subscription statistics here if the
- * subscription is associated with a replication slot since we cannot run
- * DROP SUBSCRIPTION inside a transaction block. Subscription statistics
- * will be removed later by (auto)vacuum either if it's not associated
- * with a replication slot or if the message for dropping the subscription
- * gets lost.
+ * Tell the cumulative stats system that the subscription is getting
+ * dropped. We can safely report dropping the subscription statistics here
+ * if the subscription is associated with a replication slot since we
+ * cannot run DROP SUBSCRIPTION inside a transaction block. Subscription
+ * statistics will be removed later by (auto)vacuum either if it's not
+ * associated with a replication slot or if the message for dropping the
+ * subscription gets lost.
*/
if (slotname)
pgstat_drop_subscription(subid);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 2cd8546d471..2de0ebacec3 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -495,8 +495,8 @@ static ObjectAddress addFkRecurseReferenced(List **wqueue, Constraint *fkconstra
bool old_check_ok,
Oid parentDelTrigger, Oid parentUpdTrigger);
static void validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums,
- int numfksetcols, const int16 *fksetcolsattnums,
- List *fksetcols);
+ int numfksetcols, const int16 *fksetcolsattnums,
+ List *fksetcols);
static void addFkRecurseReferencing(List **wqueue, Constraint *fkconstraint,
Relation rel, Relation pkrel, Oid indexOid, Oid parentConstr,
int numfks, int16 *pkattnum, int16 *fkattnum,
@@ -5579,7 +5579,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode,
foreach(lc, seqlist)
{
- Oid seq_relid = lfirst_oid(lc);
+ Oid seq_relid = lfirst_oid(lc);
SequenceChangePersistence(seq_relid, tab->newrelpersistence);
}
@@ -9448,8 +9448,8 @@ validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums,
{
for (int i = 0; i < numfksetcols; i++)
{
- int16 setcol_attnum = fksetcolsattnums[i];
- bool seen = false;
+ int16 setcol_attnum = fksetcolsattnums[i];
+ bool seen = false;
for (int j = 0; j < numfks; j++)
{
@@ -9462,7 +9462,8 @@ validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums,
if (!seen)
{
- char *col = strVal(list_nth(fksetcols, i));
+ char *col = strVal(list_nth(fksetcols, i));
+
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("column \"%s\" referenced in ON DELETE SET action must be part of foreign key", col)));
@@ -15890,6 +15891,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
CatalogTupleUpdate(pg_index, &pg_index_tuple->t_self, pg_index_tuple);
InvokeObjectPostAlterHookArg(IndexRelationId, thisIndexOid, 0,
InvalidOid, is_internal);
+
/*
* Invalidate the relcache for the table, so that after we commit
* all sessions will refresh the table's replica identity index
@@ -17931,12 +17933,12 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd,
/*
* If the partition we just attached is partitioned itself, invalidate
* relcache for all descendent partitions too to ensure that their
- * rd_partcheck expression trees are rebuilt; partitions already locked
- * at the beginning of this function.
+ * rd_partcheck expression trees are rebuilt; partitions already locked at
+ * the beginning of this function.
*/
if (attachrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- ListCell *l;
+ ListCell *l;
foreach(l, attachrel_children)
{
@@ -18652,13 +18654,13 @@ DetachPartitionFinalize(Relation rel, Relation partRel, bool concurrent,
/*
* If the partition we just detached is partitioned itself, invalidate
* relcache for all descendent partitions too to ensure that their
- * rd_partcheck expression trees are rebuilt; must lock partitions
- * before doing so, using the same lockmode as what partRel has been
- * locked with by the caller.
+ * rd_partcheck expression trees are rebuilt; must lock partitions before
+ * doing so, using the same lockmode as what partRel has been locked with
+ * by the caller.
*/
if (partRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- List *children;
+ List *children;
children = find_all_inheritors(RelationGetRelid(partRel),
AccessExclusiveLock, NULL);
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 822d65287ef..690f05f6620 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -89,7 +89,7 @@ char *default_tablespace = NULL;
char *temp_tablespaces = NULL;
bool allow_in_place_tablespaces = false;
-Oid binary_upgrade_next_pg_tablespace_oid = InvalidOid;
+Oid binary_upgrade_next_pg_tablespace_oid = InvalidOid;
static void create_tablespace_directories(const char *location,
const Oid tablespaceoid);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index c263f6c8b9f..984305ba31c 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -798,11 +798,11 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
*/
if (drolemembers)
{
- List *rolemembers = (List *) drolemembers->arg;
+ List *rolemembers = (List *) drolemembers->arg;
CommandCounterIncrement();
- if (stmt->action == +1) /* add members to role */
+ if (stmt->action == +1) /* add members to role */
AddRoleMems(rolename, roleid,
rolemembers, roleSpecsToIds(rolemembers),
GetUserId(), false);
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index e0fc7e8d794..8df25f59d87 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -1409,7 +1409,7 @@ vac_update_relstats(Relation relation,
*frozenxid_updated = false;
if (TransactionIdIsNormal(frozenxid) && oldfrozenxid != frozenxid)
{
- bool update = false;
+ bool update = false;
if (TransactionIdPrecedes(oldfrozenxid, frozenxid))
update = true;
@@ -1432,7 +1432,7 @@ vac_update_relstats(Relation relation,
*minmulti_updated = false;
if (MultiXactIdIsValid(minmulti) && oldminmulti != minmulti)
{
- bool update = false;
+ bool update = false;
if (MultiXactIdPrecedes(oldminmulti, minmulti))
update = true;
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index bbf3b69c57e..1753da6c830 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -112,7 +112,7 @@ typedef enum PVIndVacStatus
PARALLEL_INDVAC_STATUS_NEED_BULKDELETE,
PARALLEL_INDVAC_STATUS_NEED_CLEANUP,
PARALLEL_INDVAC_STATUS_COMPLETED
-} PVIndVacStatus;
+} PVIndVacStatus;
/*
* Struct for index vacuum statistics of an index that is used for parallel vacuum.
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index 38b94c02767..2831e7978b5 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -2504,7 +2504,7 @@ ExecInitExprRec(Expr *node, ExprState *state,
if (ctor->type == JSCTOR_JSON_SCALAR)
{
bool is_jsonb =
- ctor->returning->format->format_type == JS_FORMAT_JSONB;
+ ctor->returning->format->format_type == JS_FORMAT_JSONB;
scratch.d.json_constructor.arg_type_cache =
palloc(sizeof(*scratch.d.json_constructor.arg_type_cache) * nargs);
@@ -2666,7 +2666,7 @@ ExecInitExprRec(Expr *node, ExprState *state,
{
cstate->coercion = *coercion;
cstate->estate = *coercion ?
- ExecInitExprWithCaseValue((Expr *)(*coercion)->expr,
+ ExecInitExprWithCaseValue((Expr *) (*coercion)->expr,
state->parent,
caseval, casenull) : NULL;
}
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 3b1c045c52f..e024611aa54 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -3978,8 +3978,8 @@ ExecEvalJsonIsPredicate(ExprState *state, ExprEvalStep *op)
}
/*
- * Do full parsing pass only for uniqueness check or for
- * JSON text validation.
+ * Do full parsing pass only for uniqueness check or for JSON text
+ * validation.
*/
if (res && (pred->unique_keys || exprtype == TEXTOID))
res = json_validate(json, pred->unique_keys, false);
@@ -4513,20 +4513,20 @@ ExecEvalJsonConstructor(ExprState *state, ExprEvalStep *op,
if (ctor->type == JSCTOR_JSON_ARRAY)
res = (is_jsonb ?
jsonb_build_array_worker :
- json_build_array_worker)(op->d.json_constructor.nargs,
- op->d.json_constructor.arg_values,
- op->d.json_constructor.arg_nulls,
- op->d.json_constructor.arg_types,
- op->d.json_constructor.constructor->absent_on_null);
- else if (ctor->type == JSCTOR_JSON_OBJECT)
- res = (is_jsonb ?
- jsonb_build_object_worker :
- json_build_object_worker)(op->d.json_constructor.nargs,
+ json_build_array_worker) (op->d.json_constructor.nargs,
op->d.json_constructor.arg_values,
op->d.json_constructor.arg_nulls,
op->d.json_constructor.arg_types,
- op->d.json_constructor.constructor->absent_on_null,
- op->d.json_constructor.constructor->unique);
+ op->d.json_constructor.constructor->absent_on_null);
+ else if (ctor->type == JSCTOR_JSON_OBJECT)
+ res = (is_jsonb ?
+ jsonb_build_object_worker :
+ json_build_object_worker) (op->d.json_constructor.nargs,
+ op->d.json_constructor.arg_values,
+ op->d.json_constructor.arg_nulls,
+ op->d.json_constructor.arg_types,
+ op->d.json_constructor.constructor->absent_on_null,
+ op->d.json_constructor.constructor->unique);
else if (ctor->type == JSCTOR_JSON_SCALAR)
{
if (op->d.json_constructor.arg_nulls[0])
@@ -4622,9 +4622,9 @@ static Datum
ExecEvalJsonExprCoercion(ExprEvalStep *op, ExprContext *econtext,
Datum res, bool *isNull, void *p, bool *error)
{
- ExprState *estate = p;
+ ExprState *estate = p;
- if (estate) /* coerce using specified expression */
+ if (estate) /* coerce using specified expression */
return ExecEvalExpr(estate, econtext, isNull);
if (op->d.jsonexpr.jsexpr->op != JSON_EXISTS_OP)
@@ -4696,7 +4696,7 @@ EvalJsonPathVar(void *cxt, char *varName, int varNameLen,
if (!var->evaluated)
{
MemoryContext oldcxt = var->mcxt ?
- MemoryContextSwitchTo(var->mcxt) : NULL;
+ MemoryContextSwitchTo(var->mcxt) : NULL;
var->value = ExecEvalExpr(var->estate, var->econtext, &var->isnull);
var->evaluated = true;
@@ -4751,9 +4751,8 @@ ExecPrepareJsonItemCoercion(JsonbValue *item,
case jbvString:
coercion = &coercions->string;
- res = PointerGetDatum(
- cstring_to_text_with_len(item->val.string.val,
- item->val.string.len));
+ res = PointerGetDatum(cstring_to_text_with_len(item->val.string.val,
+ item->val.string.len));
break;
case jbvNumeric:
@@ -4809,8 +4808,8 @@ ExecPrepareJsonItemCoercion(JsonbValue *item,
return res;
}
-typedef Datum (*JsonFunc)(ExprEvalStep *op, ExprContext *econtext,
- Datum item, bool *resnull, void *p, bool *error);
+typedef Datum (*JsonFunc) (ExprEvalStep *op, ExprContext *econtext,
+ Datum item, bool *resnull, void *p, bool *error);
static Datum
ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op,
@@ -4826,8 +4825,8 @@ ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op,
return func(op, econtext, res, resnull, p, error);
/*
- * We should catch exceptions of category ERRCODE_DATA_EXCEPTION
- * and execute the corresponding ON ERROR behavior then.
+ * We should catch exceptions of category ERRCODE_DATA_EXCEPTION and
+ * execute the corresponding ON ERROR behavior then.
*/
oldcontext = CurrentMemoryContext;
oldowner = CurrentResourceOwner;
@@ -4864,7 +4863,8 @@ ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op,
ecategory = ERRCODE_TO_CATEGORY(edata->sqlerrcode);
- if (ecategory != ERRCODE_DATA_EXCEPTION && /* jsonpath and other data errors */
+ if (ecategory != ERRCODE_DATA_EXCEPTION && /* jsonpath and other data
+ * errors */
ecategory != ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION) /* domain errors */
ReThrowError(edata);
@@ -4918,7 +4918,7 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext,
if (error && *error)
return (Datum) 0;
- if (!jbv) /* NULL or empty */
+ if (!jbv) /* NULL or empty */
break;
Assert(!empty);
@@ -4949,21 +4949,23 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext,
*error = true;
return (Datum) 0;
}
+
/*
* Coercion via I/O means here that the cast to the target
* type simply does not exist.
*/
ereport(ERROR,
- /*
- * XXX Standard says about a separate error code
- * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE
- * but does not define its number.
- */
+
+ /*
+ * XXX Standard says about a separate error code
+ * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE but
+ * does not define its number.
+ */
(errcode(ERRCODE_SQL_JSON_SCALAR_REQUIRED),
errmsg("SQL/JSON item cannot be cast to target type")));
}
else if (!jcstate->estate)
- return res; /* no coercion */
+ return res; /* no coercion */
/* coerce using specific expression */
estate = jcstate->estate;
@@ -5018,6 +5020,7 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext,
}
if (jexpr->on_empty->btype == JSON_BEHAVIOR_DEFAULT)
+
/*
* Execute DEFAULT expression as a coercion expression, because
* its result is already coerced to the target type.
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 90b2699a96b..5ef5c6930fd 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -575,6 +575,7 @@ ExecReScanIndexScan(IndexScanState *node)
if (node->iss_ReorderQueue)
{
HeapTuple tuple;
+
while (!pairingheap_is_empty(node->iss_ReorderQueue))
{
tuple = reorderqueue_pop(node);
diff --git a/src/backend/executor/nodeMemoize.c b/src/backend/executor/nodeMemoize.c
index 23441e33cad..f7be4fc31f7 100644
--- a/src/backend/executor/nodeMemoize.c
+++ b/src/backend/executor/nodeMemoize.c
@@ -375,7 +375,7 @@ static void
cache_purge_all(MemoizeState *mstate)
{
uint64 evictions = mstate->hashtable->members;
- PlanState *pstate = (PlanState *) mstate;
+ PlanState *pstate = (PlanState *) mstate;
/*
* Likely the most efficient way to remove all items is to just reset the
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 982acfdad98..a49c3da5b6c 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -831,7 +831,7 @@ ExecInsert(ModifyTableContext *context,
{
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
TupleDesc plan_tdesc =
- CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+ CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 042a5f8b0a2..29bc26669b0 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -2267,10 +2267,10 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan)
else
{
stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
- src,
- plan->argtypes,
- plan->nargs,
- _SPI_current->queryEnv);
+ src,
+ plan->argtypes,
+ plan->nargs,
+ _SPI_current->queryEnv);
}
/* Finish filling in the CachedPlanSource */
@@ -2504,10 +2504,10 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
else
{
stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
- src,
- plan->argtypes,
- plan->nargs,
- _SPI_current->queryEnv);
+ src,
+ plan->argtypes,
+ plan->nargs,
+ _SPI_current->queryEnv);
}
/* Finish filling in the CachedPlanSource */
diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c
index fcd63218f28..6c72d43beb6 100644
--- a/src/backend/jit/llvm/llvmjit.c
+++ b/src/backend/jit/llvm/llvmjit.c
@@ -890,8 +890,8 @@ llvm_shutdown(int code, Datum arg)
* has occurred in the middle of LLVM code. It is not safe to call back
* into LLVM (which is why a FATAL error was thrown).
*
- * We do need to shutdown LLVM in other shutdown cases, otherwise
- * e.g. profiling data won't be written out.
+ * We do need to shutdown LLVM in other shutdown cases, otherwise e.g.
+ * profiling data won't be written out.
*/
if (llvm_in_fatal_on_oom())
{
diff --git a/src/backend/lib/dshash.c b/src/backend/lib/dshash.c
index 1b94a76e43e..ec454b4d655 100644
--- a/src/backend/lib/dshash.c
+++ b/src/backend/lib/dshash.c
@@ -634,9 +634,9 @@ dshash_seq_next(dshash_seq_status *status)
/*
* Not yet holding any partition locks. Need to determine the size of the
- * hash table, it could have been resized since we were looking
- * last. Since we iterate in partition order, we can start by
- * unconditionally lock partition 0.
+ * hash table, it could have been resized since we were looking last.
+ * Since we iterate in partition order, we can start by unconditionally
+ * lock partition 0.
*
* Once we hold the lock, no resizing can happen until the scan ends. So
* we don't need to repeatedly call ensure_valid_bucket_pointers().
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 03cdc72b406..75392a8bb7c 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -1967,8 +1967,8 @@ retry:
* because no code should expect latches to survive across
* CHECK_FOR_INTERRUPTS().
*/
- ResetLatch(MyLatch);
- goto retry;
+ ResetLatch(MyLatch);
+ goto retry;
}
}
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 836f427ea8f..205506305b0 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -2343,7 +2343,7 @@ _copyJsonReturning(const JsonReturning *from)
static JsonValueExpr *
_copyJsonValueExpr(const JsonValueExpr *from)
{
- JsonValueExpr *newnode = makeNode(JsonValueExpr);
+ JsonValueExpr *newnode = makeNode(JsonValueExpr);
COPY_NODE_FIELD(raw_expr);
COPY_NODE_FIELD(formatted_expr);
@@ -2358,7 +2358,7 @@ _copyJsonValueExpr(const JsonValueExpr *from)
static JsonParseExpr *
_copyJsonParseExpr(const JsonParseExpr *from)
{
- JsonParseExpr *newnode = makeNode(JsonParseExpr);
+ JsonParseExpr *newnode = makeNode(JsonParseExpr);
COPY_NODE_FIELD(expr);
COPY_NODE_FIELD(output);
@@ -2488,7 +2488,7 @@ _copyJsonObjectAgg(const JsonObjectAgg *from)
static JsonOutput *
_copyJsonOutput(const JsonOutput *from)
{
- JsonOutput *newnode = makeNode(JsonOutput);
+ JsonOutput *newnode = makeNode(JsonOutput);
COPY_NODE_FIELD(typeName);
COPY_NODE_FIELD(returning);
@@ -2550,7 +2550,7 @@ _copyJsonArrayQueryConstructor(const JsonArrayQueryConstructor *from)
static JsonExpr *
_copyJsonExpr(const JsonExpr *from)
{
- JsonExpr *newnode = makeNode(JsonExpr);
+ JsonExpr *newnode = makeNode(JsonExpr);
COPY_SCALAR_FIELD(op);
COPY_NODE_FIELD(formatted_expr);
@@ -2614,7 +2614,7 @@ _copyJsonItemCoercions(const JsonItemCoercions *from)
static JsonFuncExpr *
_copyJsonFuncExpr(const JsonFuncExpr *from)
{
- JsonFuncExpr *newnode = makeNode(JsonFuncExpr);
+ JsonFuncExpr *newnode = makeNode(JsonFuncExpr);
COPY_SCALAR_FIELD(op);
COPY_NODE_FIELD(common);
@@ -2651,7 +2651,7 @@ _copyJsonIsPredicate(const JsonIsPredicate *from)
static JsonBehavior *
_copyJsonBehavior(const JsonBehavior *from)
{
- JsonBehavior *newnode = makeNode(JsonBehavior);
+ JsonBehavior *newnode = makeNode(JsonBehavior);
COPY_SCALAR_FIELD(btype);
COPY_NODE_FIELD(default_expr);
@@ -2665,7 +2665,7 @@ _copyJsonBehavior(const JsonBehavior *from)
static JsonCommon *
_copyJsonCommon(const JsonCommon *from)
{
- JsonCommon *newnode = makeNode(JsonCommon);
+ JsonCommon *newnode = makeNode(JsonCommon);
COPY_NODE_FIELD(expr);
COPY_NODE_FIELD(pathspec);
@@ -2682,7 +2682,7 @@ _copyJsonCommon(const JsonCommon *from)
static JsonArgument *
_copyJsonArgument(const JsonArgument *from)
{
- JsonArgument *newnode = makeNode(JsonArgument);
+ JsonArgument *newnode = makeNode(JsonArgument);
COPY_NODE_FIELD(val);
COPY_STRING_FIELD(name);
@@ -2696,7 +2696,7 @@ _copyJsonArgument(const JsonArgument *from)
static JsonTable *
_copyJsonTable(const JsonTable *from)
{
- JsonTable *newnode = makeNode(JsonTable);
+ JsonTable *newnode = makeNode(JsonTable);
COPY_NODE_FIELD(common);
COPY_NODE_FIELD(columns);
@@ -5480,7 +5480,7 @@ _copyExtensibleNode(const ExtensibleNode *from)
static Integer *
_copyInteger(const Integer *from)
{
- Integer *newnode = makeNode(Integer);
+ Integer *newnode = makeNode(Integer);
COPY_SCALAR_FIELD(ival);
@@ -5500,7 +5500,7 @@ _copyFloat(const Float *from)
static Boolean *
_copyBoolean(const Boolean *from)
{
- Boolean *newnode = makeNode(Boolean);
+ Boolean *newnode = makeNode(Boolean);
COPY_SCALAR_FIELD(boolval);
@@ -5520,7 +5520,7 @@ _copyString(const String *from)
static BitString *
_copyBitString(const BitString *from)
{
- BitString *newnode = makeNode(BitString);
+ BitString *newnode = makeNode(BitString);
COPY_STRING_FIELD(bsval);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index e013c1bbfed..9688b22a4b9 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -2802,8 +2802,7 @@ static bool
_equalA_Const(const A_Const *a, const A_Const *b)
{
/*
- * Hack for in-line val field. Also val is not valid is isnull is
- * true.
+ * Hack for in-line val field. Also val is not valid is isnull is true.
*/
if (!a->isnull && !b->isnull &&
!equal(&a->val, &b->val))
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 4ae5e5d4dd6..3b3ef3a4cdd 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -1003,7 +1003,7 @@ exprCollation(const Node *expr)
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) expr;
+ JsonExpr *jexpr = (JsonExpr *) expr;
JsonCoercion *coercion = jexpr->result_coercion;
if (!coercion)
@@ -1239,7 +1239,8 @@ exprSetCollation(Node *expr, Oid collation)
if (ctor->coercion)
exprSetCollation((Node *) ctor->coercion, collation);
else
- Assert(!OidIsValid(collation)); /* result is always a json[b] type */
+ Assert(!OidIsValid(collation)); /* result is always a
+ * json[b] type */
}
break;
case T_JsonIsPredicate:
@@ -1247,7 +1248,7 @@ exprSetCollation(Node *expr, Oid collation)
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) expr;
+ JsonExpr *jexpr = (JsonExpr *) expr;
JsonCoercion *coercion = jexpr->result_coercion;
if (!coercion)
@@ -2496,7 +2497,7 @@ expression_tree_walker(Node *node,
return walker(((JsonIsPredicate *) node)->expr, context);
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) node;
+ JsonExpr *jexpr = (JsonExpr *) node;
if (walker(jexpr->formatted_expr, context))
return true;
@@ -3568,8 +3569,8 @@ expression_tree_mutator(Node *node,
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) node;
- JsonExpr *newnode;
+ JsonExpr *jexpr = (JsonExpr *) node;
+ JsonExpr *newnode;
FLATCOPY(newnode, jexpr, JsonExpr);
MUTATE(newnode->path_spec, jexpr->path_spec, Node *);
@@ -4545,7 +4546,7 @@ raw_expression_tree_walker(Node *node,
break;
case T_JsonTableColumn:
{
- JsonTableColumn *jtc = (JsonTableColumn *) node;
+ JsonTableColumn *jtc = (JsonTableColumn *) node;
if (walker(jtc->typeName, context))
return true;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index b1f2de8b28d..0271ea9d786 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -3613,8 +3613,8 @@ static void
_outFloat(StringInfo str, const Float *node)
{
/*
- * We assume the value is a valid numeric literal and so does not
- * need quoting.
+ * We assume the value is a valid numeric literal and so does not need
+ * quoting.
*/
appendStringInfoString(str, node->fval);
}
@@ -3629,8 +3629,8 @@ static void
_outString(StringInfo str, const String *node)
{
/*
- * We use outToken to provide escaping of the string's content,
- * but we don't want it to do anything with an empty string.
+ * We use outToken to provide escaping of the string's content, but we
+ * don't want it to do anything with an empty string.
*/
appendStringInfoChar(str, '"');
if (node->sval[0] != '\0')
diff --git a/src/backend/nodes/value.c b/src/backend/nodes/value.c
index 6fe55f5dd5c..5774a686706 100644
--- a/src/backend/nodes/value.c
+++ b/src/backend/nodes/value.c
@@ -22,7 +22,7 @@
Integer *
makeInteger(int i)
{
- Integer *v = makeNode(Integer);
+ Integer *v = makeNode(Integer);
v->ival = i;
return v;
@@ -48,7 +48,7 @@ makeFloat(char *numericStr)
Boolean *
makeBoolean(bool val)
{
- Boolean *v = makeNode(Boolean);
+ Boolean *v = makeNode(Boolean);
v->boolval = val;
return v;
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index d84f66a81b3..7ac116a791f 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -1777,17 +1777,18 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * When building a fractional path, determine a cheapest fractional
- * path for each child relation too. Looking at startup and total
- * costs is not enough, because the cheapest fractional path may be
- * dominated by two separate paths (one for startup, one for total).
+ * When building a fractional path, determine a cheapest
+ * fractional path for each child relation too. Looking at startup
+ * and total costs is not enough, because the cheapest fractional
+ * path may be dominated by two separate paths (one for startup,
+ * one for total).
*
* When needed (building fractional path), determine the cheapest
* fractional path too.
*/
if (root->tuple_fraction > 0)
{
- double path_fraction = (1.0 / root->tuple_fraction);
+ double path_fraction = (1.0 / root->tuple_fraction);
cheapest_fractional =
get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
@@ -1796,8 +1797,8 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
path_fraction);
/*
- * If we found no path with matching pathkeys, use the cheapest
- * total path instead.
+ * If we found no path with matching pathkeys, use the
+ * cheapest total path instead.
*
* XXX We might consider partially sorted paths too (with an
* incremental sort on top). But we'd have to build all the
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 6673d271c26..ed98ba7dbd2 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1794,7 +1794,7 @@ is_fake_var(Expr *expr)
static double
get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
{
- double width = -1.0; /* fake value */
+ double width = -1.0; /* fake value */
if (IsA(expr, RelabelType))
expr = (Expr *) ((RelabelType *) expr)->arg;
@@ -1802,17 +1802,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
/* Try to find actual stat in corresponding relation */
if (IsA(expr, Var))
{
- Var *var = (Var *) expr;
+ Var *var = (Var *) expr;
if (var->varno > 0 && var->varno < root->simple_rel_array_size)
{
- RelOptInfo *rel = root->simple_rel_array[var->varno];
+ RelOptInfo *rel = root->simple_rel_array[var->varno];
if (rel != NULL &&
var->varattno >= rel->min_attr &&
var->varattno <= rel->max_attr)
{
- int ndx = var->varattno - rel->min_attr;
+ int ndx = var->varattno - rel->min_attr;
if (rel->attr_widths[ndx] > 0)
width = rel->attr_widths[ndx];
@@ -1823,7 +1823,7 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
/* Didn't find any actual stats, try using type width instead. */
if (width < 0.0)
{
- Node *node = (Node*) expr;
+ Node *node = (Node *) expr;
width = get_typavgwidth(exprType(node), exprTypmod(node));
}
@@ -1832,17 +1832,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
* Values are passed as Datum type, so comparisons can't be cheaper than
* comparing a Datum value.
*
- * FIXME I find this reasoning questionable. We may pass int2, and comparing
- * it is probably a bit cheaper than comparing a bigint.
+ * FIXME I find this reasoning questionable. We may pass int2, and
+ * comparing it is probably a bit cheaper than comparing a bigint.
*/
if (width <= sizeof(Datum))
return 1.0;
/*
* We consider the cost of a comparison not to be directly proportional to
- * width of the argument, because widths of the arguments could be slightly
- * different (we only know the average width for the whole column). So we
- * use log16(width) as an estimate.
+ * width of the argument, because widths of the arguments could be
+ * slightly different (we only know the average width for the whole
+ * column). So we use log16(width) as an estimate.
*/
return 1.0 + 0.125 * LOG2(width / sizeof(Datum));
}
@@ -1902,23 +1902,23 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
bool heapSort)
{
Cost per_tuple_cost = 0.0;
- ListCell *lc;
- List *pathkeyExprs = NIL;
+ ListCell *lc;
+ List *pathkeyExprs = NIL;
double tuplesPerPrevGroup = tuples;
double totalFuncCost = 1.0;
bool has_fake_var = false;
int i = 0;
Oid prev_datatype = InvalidOid;
- List *cache_varinfos = NIL;
+ List *cache_varinfos = NIL;
/* fallback if pathkeys is unknown */
if (list_length(pathkeys) == 0)
{
/*
- * If we'll use a bounded heap-sort keeping just K tuples in memory, for
- * a total number of tuple comparisons of N log2 K; but the constant
- * factor is a bit higher than for quicksort. Tweak it so that the cost
- * curve is continuous at the crossover point.
+ * If we'll use a bounded heap-sort keeping just K tuples in memory,
+ * for a total number of tuple comparisons of N log2 K; but the
+ * constant factor is a bit higher than for quicksort. Tweak it so
+ * that the cost curve is continuous at the crossover point.
*/
output_tuples = (heapSort) ? 2.0 * output_tuples : tuples;
per_tuple_cost += 2.0 * cpu_operator_cost * LOG2(output_tuples);
@@ -1930,17 +1930,17 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
}
/*
- * Computing total cost of sorting takes into account:
- * - per column comparison function cost
- * - we try to compute needed number of comparison per column
+ * Computing total cost of sorting takes into account the per-column
+ * comparison function cost. We try to compute the needed number of
+ * comparisons per column.
*/
foreach(lc, pathkeys)
{
- PathKey *pathkey = (PathKey*) lfirst(lc);
- EquivalenceMember *em;
- double nGroups,
- correctedNGroups;
- Cost funcCost = 1.0;
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ EquivalenceMember *em;
+ double nGroups,
+ correctedNGroups;
+ Cost funcCost = 1.0;
/*
* We believe that equivalence members aren't very different, so, to
@@ -1985,10 +1985,10 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
pathkeyExprs = lappend(pathkeyExprs, em->em_expr);
/*
- * We need to calculate the number of comparisons for this column, which
- * requires knowing the group size. So we estimate the number of groups
- * by calling estimate_num_groups_incremental(), which estimates the
- * group size for "new" pathkeys.
+ * We need to calculate the number of comparisons for this column,
+ * which requires knowing the group size. So we estimate the number of
+ * groups by calling estimate_num_groups_incremental(), which
+ * estimates the group size for "new" pathkeys.
*
* Note: estimate_num_groups_incremental does not handle fake Vars, so
* use a default estimate otherwise.
@@ -1999,26 +1999,30 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
&cache_varinfos,
list_length(pathkeyExprs) - 1);
else if (tuples > 4.0)
+
/*
* Use geometric mean as estimation if there are no stats.
*
- * We don't use DEFAULT_NUM_DISTINCT here, because that’s used for
- * a single column, but here we’re dealing with multiple columns.
+ * We don't use DEFAULT_NUM_DISTINCT here, because that's used for
+ * a single column, but here we're dealing with multiple columns.
*/
nGroups = ceil(2.0 + sqrt(tuples) * (i + 1) / list_length(pathkeys));
else
nGroups = tuples;
/*
- * Presorted keys are not considered in the cost above, but we still do
- * have to compare them in the qsort comparator. So make sure to factor
- * in the cost in that case.
+ * Presorted keys are not considered in the cost above, but we still
+ * do have to compare them in the qsort comparator. So make sure to
+ * factor in the cost in that case.
*/
if (i >= nPresortedKeys)
{
if (heapSort)
{
- /* have to keep at least one group, and a multiple of group size */
+ /*
+ * have to keep at least one group, and a multiple of group
+ * size
+ */
correctedNGroups = ceil(output_tuples / tuplesPerPrevGroup);
}
else
@@ -2033,19 +2037,20 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
i++;
/*
- * Uniform distributions with all groups being of the same size are the
- * best case, with nice smooth behavior. Real-world distributions tend
- * not to be uniform, though, and we don’t have any reliable easy-to-use
- * information. As a basic defense against skewed distributions, we use
- * a 1.5 factor to make the expected group a bit larger, but we need to
- * be careful not to make the group larger than in the preceding step.
+ * Uniform distributions with all groups being of the same size are
+ * the best case, with nice smooth behavior. Real-world distributions
+ * tend not to be uniform, though, and we don't have any reliable
+ * easy-to-use information. As a basic defense against skewed
+ * distributions, we use a 1.5 factor to make the expected group a bit
+ * larger, but we need to be careful not to make the group larger than
+ * in the preceding step.
*/
tuplesPerPrevGroup = Min(tuplesPerPrevGroup,
ceil(1.5 * tuplesPerPrevGroup / nGroups));
/*
- * Once we get single-row group, it means tuples in the group are unique
- * and we can skip all remaining columns.
+ * Once we get single-row group, it means tuples in the group are
+ * unique and we can skip all remaining columns.
*/
if (tuplesPerPrevGroup <= 1.0)
break;
@@ -2057,15 +2062,15 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
per_tuple_cost *= cpu_operator_cost;
/*
- * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles E.
- * Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort estimation
- * formula has additional term proportional to number of tuples (See Chapter
- * 8.2 and Theorem 4.1). That affects cases with a low number of tuples,
- * approximately less than 1e4. We could implement it as an additional
- * multiplier under the logarithm, but we use a bit more complex formula
- * which takes into account the number of unique tuples and it’s not clear
- * how to combine the multiplier with the number of groups. Estimate it as
- * 10 in cpu_operator_cost unit.
+ * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles
+ * E. Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort
+ * estimation formula has additional term proportional to number of tuples
+ * (see Chapter 8.2 and Theorem 4.1). That affects cases with a low number
+ * of tuples, approximately less than 1e4. We could implement it as an
+ * additional multiplier under the logarithm, but we use a bit more
+ * complex formula which takes into account the number of unique tuples
+ * and it's not clear how to combine the multiplier with the number of
+ * groups. Estimate it as 10 cpu_operator_cost units.
*/
per_tuple_cost += 10 * cpu_operator_cost;
@@ -2082,7 +2087,7 @@ cost_sort_estimate(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
double tuples)
{
return compute_cpu_sort_cost(root, pathkeys, nPresortedKeys,
- 0, tuples, tuples, false);
+ 0, tuples, tuples, false);
}
/*
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index 34c5ab1cb60..60c0e3f1089 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -685,9 +685,9 @@ get_eclass_for_sort_expr(PlannerInfo *root,
/*
* Match!
*
- * Copy the sortref if it wasn't set yet. That may happen if the
- * ec was constructed from WHERE clause, i.e. it doesn't have a
- * target reference at all.
+ * Copy the sortref if it wasn't set yet. That may happen if
+ * the ec was constructed from WHERE clause, i.e. it doesn't
+ * have a target reference at all.
*/
if (cur_ec->ec_sortref == 0 && sortref > 0)
cur_ec->ec_sortref = sortref;
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 9a8c5165b04..55206ec54d2 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -1258,7 +1258,7 @@ sort_inner_and_outer(PlannerInfo *root,
foreach(l, all_pathkeys)
{
- PathKey *front_pathkey = (PathKey *) lfirst(l);
+ PathKey *front_pathkey = (PathKey *) lfirst(l);
List *cur_mergeclauses;
List *outerkeys;
List *innerkeys;
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 91556910aec..9775c4a7225 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -32,7 +32,7 @@
#include "utils/selfuncs.h"
/* Consider reordering of GROUP BY keys? */
-bool enable_group_by_reordering = true;
+bool enable_group_by_reordering = true;
static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys);
static bool matches_boolean_partition_clause(RestrictInfo *rinfo,
@@ -352,7 +352,7 @@ int
group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
List **group_clauses)
{
- List *new_group_pathkeys= NIL,
+ List *new_group_pathkeys = NIL,
*new_group_clauses = NIL;
ListCell *lc;
int n;
@@ -365,16 +365,16 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
* there's a matching GROUP BY key. If we find one, we append it to the
* list, and do the same for the clauses.
*
- * Once we find the first pathkey without a matching GROUP BY key, the rest
- * of the pathkeys are useless and can't be used to evaluate the grouping,
- * so we abort the loop and ignore the remaining pathkeys.
+ * Once we find the first pathkey without a matching GROUP BY key, the
+ * rest of the pathkeys are useless and can't be used to evaluate the
+ * grouping, so we abort the loop and ignore the remaining pathkeys.
*
* XXX Pathkeys are built in a way to allow simply comparing pointers.
*/
foreach(lc, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(lc);
- SortGroupClause *sgc;
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ SortGroupClause *sgc;
/* abort on first mismatch */
if (!list_member_ptr(*group_pathkeys, pathkey))
@@ -403,13 +403,14 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
/*
* Used to generate all permutations of a pathkey list.
*/
-typedef struct PathkeyMutatorState {
+typedef struct PathkeyMutatorState
+{
List *elemsList;
ListCell **elemCells;
void **elems;
int *positions;
- int mutatorNColumns;
- int count;
+ int mutatorNColumns;
+ int count;
} PathkeyMutatorState;
@@ -428,9 +429,9 @@ typedef struct PathkeyMutatorState {
static void
PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
{
- int i;
+ int i;
int n = end - start;
- ListCell *lc;
+ ListCell *lc;
memset(state, 0, sizeof(*state));
@@ -438,8 +439,8 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
state->elemsList = list_copy(elems);
- state->elems = palloc(sizeof(void*) * n);
- state->elemCells = palloc(sizeof(ListCell*) * n);
+ state->elems = palloc(sizeof(void *) * n);
+ state->elemCells = palloc(sizeof(ListCell *) * n);
state->positions = palloc(sizeof(int) * n);
i = 0;
@@ -459,10 +460,10 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
static void
PathkeyMutatorSwap(int *a, int i, int j)
{
- int s = a[i];
+ int s = a[i];
- a[i] = a[j];
- a[j] = s;
+ a[i] = a[j];
+ a[j] = s;
}
/*
@@ -471,7 +472,10 @@ PathkeyMutatorSwap(int *a, int i, int j)
static bool
PathkeyMutatorNextSet(int *a, int n)
{
- int j, k, l, r;
+ int j,
+ k,
+ l,
+ r;
j = n - 2;
@@ -507,7 +511,7 @@ PathkeyMutatorNextSet(int *a, int n)
static List *
PathkeyMutatorNext(PathkeyMutatorState *state)
{
- int i;
+ int i;
state->count++;
@@ -528,9 +532,9 @@ PathkeyMutatorNext(PathkeyMutatorState *state)
}
/* update the list cells to point to the right elements */
- for(i = 0; i < state->mutatorNColumns; i++)
+ for (i = 0; i < state->mutatorNColumns; i++)
lfirst(state->elemCells[i]) =
- (void *) state->elems[ state->positions[i] - 1 ];
+ (void *) state->elems[state->positions[i] - 1];
return state->elemsList;
}
@@ -541,7 +545,7 @@ PathkeyMutatorNext(PathkeyMutatorState *state)
typedef struct PathkeySortCost
{
Cost cost;
- PathKey *pathkey;
+ PathKey *pathkey;
} PathkeySortCost;
static int
@@ -581,41 +585,42 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
List **group_pathkeys, List **group_clauses,
int n_preordered)
{
- List *new_group_pathkeys = NIL,
- *new_group_clauses = NIL,
- *var_group_pathkeys;
+ List *new_group_pathkeys = NIL,
+ *new_group_clauses = NIL,
+ *var_group_pathkeys;
- ListCell *cell;
- PathkeyMutatorState mstate;
- double cheapest_sort_cost = -1.0;
+ ListCell *cell;
+ PathkeyMutatorState mstate;
+ double cheapest_sort_cost = -1.0;
- int nFreeKeys;
- int nToPermute;
+ int nFreeKeys;
+ int nToPermute;
/* If there are less than 2 unsorted pathkeys, we're done. */
if (list_length(*group_pathkeys) - n_preordered < 2)
return false;
/*
- * We could exhaustively cost all possible orderings of the pathkeys, but for
- * a large number of pathkeys it might be prohibitively expensive. So we try
- * to apply simple cheap heuristics first - we sort the pathkeys by sort cost
- * (as if the pathkey was sorted independently) and then check only the four
- * cheapest pathkeys. The remaining pathkeys are kept ordered by cost.
+ * We could exhaustively cost all possible orderings of the pathkeys, but
+ * for a large number of pathkeys it might be prohibitively expensive. So
+ * we try to apply simple cheap heuristics first - we sort the pathkeys by
+ * sort cost (as if the pathkey was sorted independently) and then check
+ * only the four cheapest pathkeys. The remaining pathkeys are kept
+ * ordered by cost.
*
* XXX This is a very simple heuristics, but likely to work fine for most
- * cases (because the number of GROUP BY clauses tends to be lower than 4).
- * But it ignores how the number of distinct values in each pathkey affects
- * the following steps. It might be better to use "more expensive" pathkey
- * first if it has many distinct values, because it then limits the number
- * of comparisons for the remaining pathkeys. But evaluating that is likely
- * quite the expensive.
+ * cases (because the number of GROUP BY clauses tends to be lower than
+ * 4). But it ignores how the number of distinct values in each pathkey
+ * affects the following steps. It might be better to use "more expensive"
+ * pathkey first if it has many distinct values, because it then limits
+ * the number of comparisons for the remaining pathkeys. But evaluating
+ * that is likely quite the expensive.
*/
nFreeKeys = list_length(*group_pathkeys) - n_preordered;
nToPermute = 4;
if (nFreeKeys > nToPermute)
{
- int i;
+ int i;
PathkeySortCost *costs = palloc(sizeof(PathkeySortCost) * nFreeKeys);
/* skip the pre-ordered pathkeys */
@@ -624,7 +629,7 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
/* estimate cost for sorting individual pathkeys */
for (i = 0; cell != NULL; i++, (cell = lnext(*group_pathkeys, cell)))
{
- List *to_cost = list_make1(lfirst(cell));
+ List *to_cost = list_make1(lfirst(cell));
Assert(i < nFreeKeys);
@@ -658,28 +663,29 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
Assert(list_length(new_group_pathkeys) == list_length(*group_pathkeys));
/*
- * Generate pathkey lists with permutations of the first nToPermute pathkeys.
+ * Generate pathkey lists with permutations of the first nToPermute
+ * pathkeys.
*
* XXX We simply calculate sort cost for each individual pathkey list, but
- * there's room for two dynamic programming optimizations here. Firstly, we
- * may pass the current "best" cost to cost_sort_estimate so that it can
- * "abort" if the estimated pathkeys list exceeds it. Secondly, it could pass
- * the return information about the position when it exceeded the cost, and
- * we could skip all permutations with the same prefix.
+ * there's room for two dynamic programming optimizations here. Firstly,
+ * we may pass the current "best" cost to cost_sort_estimate so that it
+ * can "abort" if the estimated pathkeys list exceeds it. Secondly, it
+ * could pass the return information about the position when it exceeded
+ * the cost, and we could skip all permutations with the same prefix.
*
* Imagine we've already found ordering with cost C1, and we're evaluating
* another ordering - cost_sort_estimate() calculates cost by adding the
* pathkeys one by one (more or less), and the cost only grows. If at any
- * point it exceeds C1, it can't possibly be "better" so we can discard it.
- * But we also know that we can discard all ordering with the same prefix,
- * because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) then the
- * same thing will happen for any ordering with this prefix.
+ * point it exceeds C1, it can't possibly be "better" so we can discard
+ * it. But we also know that we can discard all ordering with the same
+ * prefix, because if we're estimating (a,b,c,d) and we exceed C1 at (a,b)
+ * then the same thing will happen for any ordering with this prefix.
*/
PathkeyMutatorInit(&mstate, new_group_pathkeys, n_preordered, n_preordered + nToPermute);
- while((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
+ while ((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
{
- Cost cost;
+ Cost cost;
cost = cost_sort_estimate(root, var_group_pathkeys, n_preordered, nrows);
@@ -694,11 +700,11 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
/* Reorder the group clauses according to the reordered pathkeys. */
foreach(cell, new_group_pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(cell);
+ PathKey *pathkey = (PathKey *) lfirst(cell);
new_group_clauses = lappend(new_group_clauses,
- get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
- *group_clauses));
+ get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
+ *group_clauses));
}
/* Just append the rest GROUP BY clauses */
@@ -745,8 +751,8 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
PathKeyInfo *info;
int n_preordered = 0;
- List *pathkeys = group_pathkeys;
- List *clauses = group_clauses;
+ List *pathkeys = group_pathkeys;
+ List *clauses = group_clauses;
/* always return at least the original pathkeys/clauses */
info = makeNode(PathKeyInfo);
@@ -756,9 +762,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
infos = lappend(infos, info);
/*
- * Should we try generating alternative orderings of the group keys? If not,
- * we produce only the order specified in the query, i.e. the optimization
- * is effectively disabled.
+ * Should we try generating alternative orderings of the group keys? If
+ * not, we produce only the order specified in the query, i.e. the
+ * optimization is effectively disabled.
*/
if (!enable_group_by_reordering)
return infos;
@@ -782,8 +788,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
}
/*
- * If the path is sorted in some way, try reordering the group keys to match
- * as much of the ordering as possible - we get this sort for free (mostly).
+ * If the path is sorted in some way, try reordering the group keys to
+ * match as much of the ordering as possible - we get this sort for free
+ * (mostly).
*
* We must not do this when there are no grouping sets, because those use
* more complex logic to decide the ordering.
@@ -2400,8 +2407,8 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
static int
pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys)
{
- ListCell *key;
- int n = 0;
+ ListCell *key;
+ int n = 0;
/* no special ordering requested for grouping */
if (root->group_pathkeys == NIL)
@@ -2414,7 +2421,7 @@ pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys)
/* walk the pathkeys and search for matching group key */
foreach(key, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(key);
+ PathKey *pathkey = (PathKey *) lfirst(key);
/* no matching group key, we're done */
if (!list_member_ptr(root->group_pathkeys, pathkey))
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index db11936efef..f4cc56039c2 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -1162,8 +1162,8 @@ mark_async_capable_plan(Plan *plan, Path *path)
case T_ProjectionPath:
/*
- * If the generated plan node includes a Result node for
- * the projection, we can't execute it asynchronously.
+ * If the generated plan node includes a Result node for the
+ * projection, we can't execute it asynchronously.
*/
if (IsA(plan, Result))
return false;
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 9a4accb4d9d..a0f2390334e 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -6250,7 +6250,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6283,8 +6283,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->hasAggs)
{
/*
- * We have aggregation, possibly with plain GROUP BY. Make
- * an AggPath.
+ * We have aggregation, possibly with plain GROUP BY.
+ * Make an AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
@@ -6301,8 +6301,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (group_clauses)
{
/*
- * We have GROUP BY without aggregation or grouping sets.
- * Make a GroupPath.
+ * We have GROUP BY without aggregation or grouping
+ * sets. Make a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
@@ -6321,8 +6321,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
/*
* Now we may consider incremental sort on this path, but only
- * when the path is not already sorted and when incremental sort
- * is enabled.
+ * when the path is not already sorted and when incremental
+ * sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
@@ -6335,8 +6335,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
continue;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(root->group_pathkeys) != 1);
@@ -6357,8 +6358,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->hasAggs)
{
/*
- * We have aggregation, possibly with plain GROUP BY. Make an
- * AggPath.
+ * We have aggregation, possibly with plain GROUP BY. Make
+ * an AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
@@ -6375,8 +6376,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->groupClause)
{
/*
- * We have GROUP BY without aggregation or grouping sets. Make
- * a GroupPath.
+ * We have GROUP BY without aggregation or grouping sets.
+ * Make a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
@@ -6421,7 +6422,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6435,8 +6436,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
&presorted_keys);
/*
- * Insert a Sort node, if required. But there's no point in
- * sorting anything but the cheapest path.
+ * Insert a Sort node, if required. But there's no point
+ * in sorting anything but the cheapest path.
*/
if (!is_sorted)
{
@@ -6471,24 +6472,30 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
dNumGroups));
/*
- * Now we may consider incremental sort on this path, but only
- * when the path is not already sorted and when incremental
- * sort is enabled.
+ * Now we may consider incremental sort on this path, but
+ * only when the path is not already sorted and when
+ * incremental sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
- /* Restore the input path (we might have added Sort on top). */
+ /*
+ * Restore the input path (we might have added Sort on
+ * top).
+ */
path = path_original;
- /* no shared prefix, not point in building incremental sort */
+ /*
+ * no shared prefix, not point in building incremental
+ * sort
+ */
if (presorted_keys == 0)
continue;
/*
* We should have already excluded pathkeys of length 1
- * because then presorted_keys > 0 would imply is_sorted was
- * true.
+ * because then presorted_keys > 0 would imply is_sorted
+ * was true.
*/
Assert(list_length(root->group_pathkeys) != 1);
@@ -6741,7 +6748,7 @@ create_partial_grouping_paths(PlannerInfo *root,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6874,7 +6881,7 @@ create_partial_grouping_paths(PlannerInfo *root,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
@@ -6924,8 +6931,8 @@ create_partial_grouping_paths(PlannerInfo *root,
/*
* Now we may consider incremental sort on this path, but only
- * when the path is not already sorted and when incremental sort
- * is enabled.
+ * when the path is not already sorted and when incremental
+ * sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
@@ -6938,8 +6945,9 @@ create_partial_grouping_paths(PlannerInfo *root,
continue;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(root->group_pathkeys) != 1);
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index e381ae512a2..533df86ff77 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -391,7 +391,7 @@ contain_mutable_functions_walker(Node *node, void *context)
const JsonConstructorExpr *ctor = (JsonConstructorExpr *) node;
ListCell *lc;
bool is_jsonb =
- ctor->returning->format->format_type == JS_FORMAT_JSONB;
+ ctor->returning->format->format_type == JS_FORMAT_JSONB;
/* Check argument_type => json[b] conversions */
foreach(lc, ctor->args)
@@ -899,7 +899,7 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context)
/* JsonExpr is parallel-unsafe if subtransactions can be used. */
else if (IsA(node, JsonExpr))
{
- JsonExpr *jsexpr = (JsonExpr *) node;
+ JsonExpr *jsexpr = (JsonExpr *) node;
if (ExecEvalJsonNeedsSubTransaction(jsexpr, NULL))
{
@@ -3581,7 +3581,7 @@ eval_const_expressions_mutator(Node *node,
context->case_val = raw;
formatted = eval_const_expressions_mutator((Node *) jve->formatted_expr,
- context);
+ context);
context->case_val = save_case_val;
@@ -5315,7 +5315,7 @@ pull_paramids_walker(Node *node, Bitmapset **context)
return false;
if (IsA(node, Param))
{
- Param *param = (Param *)node;
+ Param *param = (Param *) node;
*context = bms_add_member(*context, param->paramid);
return false;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index df97b799174..5012bfe1425 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -968,102 +968,102 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
{
- table_relation_estimate_size(rel, attr_widths, pages, tuples,
- allvisfrac);
+ table_relation_estimate_size(rel, attr_widths, pages, tuples,
+ allvisfrac);
}
else if (rel->rd_rel->relkind == RELKIND_INDEX)
{
- /*
- * XXX: It'd probably be good to move this into a callback,
- * individual index types e.g. know if they have a metapage.
- */
+ /*
+ * XXX: It'd probably be good to move this into a callback, individual
+ * index types e.g. know if they have a metapage.
+ */
- /* it has storage, ok to call the smgr */
- curpages = RelationGetNumberOfBlocks(rel);
+ /* it has storage, ok to call the smgr */
+ curpages = RelationGetNumberOfBlocks(rel);
- /* report estimated # pages */
- *pages = curpages;
- /* quick exit if rel is clearly empty */
- if (curpages == 0)
- {
- *tuples = 0;
- *allvisfrac = 0;
- return;
- }
+ /* report estimated # pages */
+ *pages = curpages;
+ /* quick exit if rel is clearly empty */
+ if (curpages == 0)
+ {
+ *tuples = 0;
+ *allvisfrac = 0;
+ return;
+ }
- /* coerce values in pg_class to more desirable types */
- relpages = (BlockNumber) rel->rd_rel->relpages;
- reltuples = (double) rel->rd_rel->reltuples;
- relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
+ /* coerce values in pg_class to more desirable types */
+ relpages = (BlockNumber) rel->rd_rel->relpages;
+ reltuples = (double) rel->rd_rel->reltuples;
+ relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
+ /*
+ * Discount the metapage while estimating the number of tuples. This
+ * is a kluge because it assumes more than it ought to about index
+ * structure. Currently it's OK for btree, hash, and GIN indexes but
+ * suspect for GiST indexes.
+ */
+ if (relpages > 0)
+ {
+ curpages--;
+ relpages--;
+ }
+
+ /* estimate number of tuples from previous tuple density */
+ if (reltuples >= 0 && relpages > 0)
+ density = reltuples / (double) relpages;
+ else
+ {
/*
- * Discount the metapage while estimating the number of tuples.
- * This is a kluge because it assumes more than it ought to about
- * index structure. Currently it's OK for btree, hash, and GIN
- * indexes but suspect for GiST indexes.
+ * If we have no data because the relation was never vacuumed,
+ * estimate tuple width from attribute datatypes. We assume here
+ * that the pages are completely full, which is OK for tables
+ * (since they've presumably not been VACUUMed yet) but is
+ * probably an overestimate for indexes. Fortunately
+ * get_relation_info() can clamp the overestimate to the parent
+ * table's size.
+ *
+ * Note: this code intentionally disregards alignment
+ * considerations, because (a) that would be gilding the lily
+ * considering how crude the estimate is, and (b) it creates
+ * platform dependencies in the default plans which are kind of a
+ * headache for regression testing.
+ *
+ * XXX: Should this logic be more index specific?
*/
- if (relpages > 0)
- {
- curpages--;
- relpages--;
- }
-
- /* estimate number of tuples from previous tuple density */
- if (reltuples >= 0 && relpages > 0)
- density = reltuples / (double) relpages;
- else
- {
- /*
- * If we have no data because the relation was never vacuumed,
- * estimate tuple width from attribute datatypes. We assume
- * here that the pages are completely full, which is OK for
- * tables (since they've presumably not been VACUUMed yet) but
- * is probably an overestimate for indexes. Fortunately
- * get_relation_info() can clamp the overestimate to the
- * parent table's size.
- *
- * Note: this code intentionally disregards alignment
- * considerations, because (a) that would be gilding the lily
- * considering how crude the estimate is, and (b) it creates
- * platform dependencies in the default plans which are kind
- * of a headache for regression testing.
- *
- * XXX: Should this logic be more index specific?
- */
- int32 tuple_width;
+ int32 tuple_width;
- tuple_width = get_rel_data_width(rel, attr_widths);
- tuple_width += MAXALIGN(SizeofHeapTupleHeader);
- tuple_width += sizeof(ItemIdData);
- /* note: integer division is intentional here */
- density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
- }
- *tuples = rint(density * (double) curpages);
+ tuple_width = get_rel_data_width(rel, attr_widths);
+ tuple_width += MAXALIGN(SizeofHeapTupleHeader);
+ tuple_width += sizeof(ItemIdData);
+ /* note: integer division is intentional here */
+ density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
+ }
+ *tuples = rint(density * (double) curpages);
- /*
- * We use relallvisible as-is, rather than scaling it up like we
- * do for the pages and tuples counts, on the theory that any
- * pages added since the last VACUUM are most likely not marked
- * all-visible. But costsize.c wants it converted to a fraction.
- */
- if (relallvisible == 0 || curpages <= 0)
- *allvisfrac = 0;
- else if ((double) relallvisible >= curpages)
- *allvisfrac = 1;
- else
- *allvisfrac = (double) relallvisible / curpages;
+ /*
+ * We use relallvisible as-is, rather than scaling it up like we do
+ * for the pages and tuples counts, on the theory that any pages added
+ * since the last VACUUM are most likely not marked all-visible. But
+ * costsize.c wants it converted to a fraction.
+ */
+ if (relallvisible == 0 || curpages <= 0)
+ *allvisfrac = 0;
+ else if ((double) relallvisible >= curpages)
+ *allvisfrac = 1;
+ else
+ *allvisfrac = (double) relallvisible / curpages;
}
else
{
- /*
- * Just use whatever's in pg_class. This covers foreign tables,
- * sequences, and also relkinds without storage (shouldn't get
- * here?); see initializations in AddNewRelationTuple(). Note
- * that FDW must cope if reltuples is -1!
- */
- *pages = rel->rd_rel->relpages;
- *tuples = rel->rd_rel->reltuples;
- *allvisfrac = 0;
+ /*
+ * Just use whatever's in pg_class. This covers foreign tables,
+ * sequences, and also relkinds without storage (shouldn't get here?);
+ * see initializations in AddNewRelationTuple(). Note that FDW must
+ * cope if reltuples is -1!
+ */
+ *pages = rel->rd_rel->relpages;
+ *tuples = rel->rd_rel->reltuples;
+ *allvisfrac = 0;
}
}
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 6b54e8e46df..1bcb875507d 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -104,8 +104,8 @@ static bool test_raw_expression_coverage(Node *node, void *context);
*/
Query *
parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText,
- const Oid *paramTypes, int numParams,
- QueryEnvironment *queryEnv)
+ const Oid *paramTypes, int numParams,
+ QueryEnvironment *queryEnv)
{
ParseState *pstate = make_parsestate(NULL);
Query *query;
@@ -2076,8 +2076,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
ListCell *ltl;
ListCell *rtl;
const char *context;
- bool recursive = (pstate->p_parent_cte &&
- pstate->p_parent_cte->cterecursive);
+ bool recursive = (pstate->p_parent_cte &&
+ pstate->p_parent_cte->cterecursive);
context = (stmt->op == SETOP_UNION ? "UNION" :
(stmt->op == SETOP_INTERSECT ? "INTERSECT" :
@@ -2231,7 +2231,10 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
setup_parser_errposition_callback(&pcbstate, pstate,
bestlocation);
- /* If it's a recursive union, we need to require hashing support. */
+ /*
+ * If it's a recursive union, we need to require hashing
+ * support.
+ */
op->groupClauses = lappend(op->groupClauses,
makeSortGroupClauseForSetOp(rescoltype, recursive));
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index dafde68b207..e2baa9d852e 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -2004,7 +2004,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
}
if (IsA(node, A_Const))
{
- A_Const *aconst = castNode(A_Const, node);
+ A_Const *aconst = castNode(A_Const, node);
int targetlist_pos = 0;
int target_pos;
diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c
index 45dacc6c4c5..e90af4c4771 100644
--- a/src/backend/parser/parse_collate.c
+++ b/src/backend/parser/parse_collate.c
@@ -692,8 +692,11 @@ assign_collations_walker(Node *node, assign_collations_context *context)
}
break;
case T_JsonExpr:
- /* Context item and PASSING arguments are already
- * marked with collations in parse_expr.c. */
+
+ /*
+ * Context item and PASSING arguments are already
+ * marked with collations in parse_expr.c.
+ */
break;
default:
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index c1f194cc5b0..17709c3416b 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -3277,7 +3277,7 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve,
if (exprtype == JSONOID || exprtype == JSONBOID)
{
- format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
+ format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
ereport(WARNING,
(errmsg("FORMAT JSON has no effect for json and jsonb types"),
parser_errposition(pstate, ve->format->location)));
@@ -3316,7 +3316,7 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve,
format = default_format;
}
else if (exprtype == JSONOID || exprtype == JSONBOID)
- format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
+ format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
else
format = default_format;
@@ -3364,13 +3364,13 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve,
FuncExpr *fexpr;
Oid fnoid;
- if (cast_is_needed) /* only CAST is allowed */
+ if (cast_is_needed) /* only CAST is allowed */
ereport(ERROR,
(errcode(ERRCODE_CANNOT_COERCE),
errmsg("cannot cast type %s to %s",
format_type_be(exprtype),
format_type_be(targettype)),
- parser_errposition(pstate, location)));
+ parser_errposition(pstate, location)));
fnoid = targettype == JSONOID ? F_TO_JSON : F_TO_JSONB;
fexpr = makeFuncExpr(fnoid, targettype, list_make1(expr),
@@ -3444,7 +3444,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format,
if (format->format_type == JS_FORMAT_JSON)
{
JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
- format->encoding : JS_ENC_UTF8;
+ format->encoding : JS_ENC_UTF8;
if (targettype != BYTEAOID &&
format->encoding != JS_ENC_DEFAULT)
@@ -3583,6 +3583,7 @@ coerceJsonFuncExpr(ParseState *pstate, Node *expr,
list_make2(texpr, enc),
InvalidOid, InvalidOid,
COERCE_EXPLICIT_CALL);
+
fexpr->location = location;
return (Node *) fexpr;
@@ -3591,7 +3592,7 @@ coerceJsonFuncExpr(ParseState *pstate, Node *expr,
/* try to coerce expression to the output type */
res = coerce_to_target_type(pstate, expr, exprtype,
returning->typid, returning->typmod,
- /* XXX throwing errors when casting to char(N) */
+ /* XXX throwing errors when casting to char(N) */
COERCION_EXPLICIT,
COERCE_EXPLICIT_CAST,
location);
@@ -3616,7 +3617,7 @@ makeJsonConstructorExpr(ParseState *pstate, JsonConstructorType type,
Node *placeholder;
Node *coercion;
Oid intermediate_typid =
- returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID;
+ returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID;
jsctor->args = args;
jsctor->func = fexpr;
@@ -3694,7 +3695,7 @@ static Node *
transformJsonArrayQueryConstructor(ParseState *pstate,
JsonArrayQueryConstructor *ctor)
{
- SubLink *sublink = makeNode(SubLink);
+ SubLink *sublink = makeNode(SubLink);
SelectStmt *select = makeNode(SelectStmt);
RangeSubselect *range = makeNode(RangeSubselect);
Alias *alias = makeNode(Alias);
@@ -3766,8 +3767,8 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor,
Oid aggfnoid;
Node *node;
Expr *aggfilter = agg_ctor->agg_filter ? (Expr *)
- transformWhereClause(pstate, agg_ctor->agg_filter,
- EXPR_KIND_FILTER, "FILTER") : NULL;
+ transformWhereClause(pstate, agg_ctor->agg_filter,
+ EXPR_KIND_FILTER, "FILTER") : NULL;
aggfnoid = DatumGetInt32(DirectFunctionCall1(regprocin,
CStringGetDatum(aggfn)));
@@ -3809,7 +3810,7 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor,
aggref->aggtype = aggtype;
/* aggcollid and inputcollid will be set by parse_collate.c */
- aggref->aggtranstype = InvalidOid; /* will be set by planner */
+ aggref->aggtranstype = InvalidOid; /* will be set by planner */
/* aggargtypes will be set by transformAggregateCall */
/* aggdirectargs and args will be set by transformAggregateCall */
/* aggorder and aggdistinct will be set by transformAggregateCall */
@@ -3818,7 +3819,7 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor,
aggref->aggvariadic = false;
aggref->aggkind = AGGKIND_NORMAL;
/* agglevelsup will be set by transformAggregateCall */
- aggref->aggsplit = AGGSPLIT_SIMPLE; /* planner might change this */
+ aggref->aggsplit = AGGSPLIT_SIMPLE; /* planner might change this */
aggref->location = agg_ctor->location;
transformAggregateCall(pstate, aggref, args, agg_ctor->agg_order, false);
@@ -3860,14 +3861,13 @@ transformJsonObjectAgg(ParseState *pstate, JsonObjectAgg *agg)
{
if (agg->absent_on_null)
if (agg->unique)
- aggfnname = "pg_catalog.jsonb_object_agg_unique_strict"; /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */
+ aggfnname = "pg_catalog.jsonb_object_agg_unique_strict"; /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */
else
- aggfnname = "pg_catalog.jsonb_object_agg_strict"; /* F_JSONB_OBJECT_AGG_STRICT */
+ aggfnname = "pg_catalog.jsonb_object_agg_strict"; /* F_JSONB_OBJECT_AGG_STRICT */
+ else if (agg->unique)
+ aggfnname = "pg_catalog.jsonb_object_agg_unique"; /* F_JSONB_OBJECT_AGG_UNIQUE */
else
- if (agg->unique)
- aggfnname = "pg_catalog.jsonb_object_agg_unique"; /* F_JSONB_OBJECT_AGG_UNIQUE */
- else
- aggfnname = "pg_catalog.jsonb_object_agg"; /* F_JSONB_OBJECT_AGG */
+ aggfnname = "pg_catalog.jsonb_object_agg"; /* F_JSONB_OBJECT_AGG */
aggtype = JSONBOID;
}
@@ -3877,12 +3877,11 @@ transformJsonObjectAgg(ParseState *pstate, JsonObjectAgg *agg)
if (agg->unique)
aggfnname = "pg_catalog.json_object_agg_unique_strict"; /* F_JSON_OBJECT_AGG_UNIQUE_STRICT */
else
- aggfnname = "pg_catalog.json_object_agg_strict"; /* F_JSON_OBJECT_AGG_STRICT */
+ aggfnname = "pg_catalog.json_object_agg_strict"; /* F_JSON_OBJECT_AGG_STRICT */
+ else if (agg->unique)
+ aggfnname = "pg_catalog.json_object_agg_unique"; /* F_JSON_OBJECT_AGG_UNIQUE */
else
- if (agg->unique)
- aggfnname = "pg_catalog.json_object_agg_unique"; /* F_JSON_OBJECT_AGG_UNIQUE */
- else
- aggfnname = "pg_catalog.json_object_agg"; /* F_JSON_OBJECT_AGG */
+ aggfnname = "pg_catalog.json_object_agg"; /* F_JSON_OBJECT_AGG */
aggtype = JSONOID;
}
@@ -4209,7 +4208,7 @@ coerceJsonExpr(ParseState *pstate, Node *expr, const JsonReturning *returning)
* Transform a JSON output clause of JSON_VALUE and JSON_QUERY.
*/
static void
-transformJsonFuncExprOutput(ParseState *pstate, JsonFuncExpr *func,
+transformJsonFuncExprOutput(ParseState *pstate, JsonFuncExpr *func,
JsonExpr *jsexpr)
{
Node *expr = jsexpr->formatted_expr;
@@ -4333,19 +4332,19 @@ initJsonItemCoercions(ParseState *pstate, JsonItemCoercions *coercions,
Oid typid;
} *p,
coercionTypids[] =
- {
- { &coercions->null, UNKNOWNOID },
- { &coercions->string, TEXTOID },
- { &coercions->numeric, NUMERICOID },
- { &coercions->boolean, BOOLOID },
- { &coercions->date, DATEOID },
- { &coercions->time, TIMEOID },
- { &coercions->timetz, TIMETZOID },
- { &coercions->timestamp, TIMESTAMPOID },
- { &coercions->timestamptz, TIMESTAMPTZOID },
- { &coercions->composite, contextItemTypeId },
- { NULL, InvalidOid }
- };
+ {
+ {&coercions->null, UNKNOWNOID},
+ {&coercions->string, TEXTOID},
+ {&coercions->numeric, NUMERICOID},
+ {&coercions->boolean, BOOLOID},
+ {&coercions->date, DATEOID},
+ {&coercions->time, TIMEOID},
+ {&coercions->timetz, TIMETZOID},
+ {&coercions->timestamp, TIMESTAMPOID},
+ {&coercions->timestamptz, TIMESTAMPTZOID},
+ {&coercions->composite, contextItemTypeId},
+ {NULL, InvalidOid}
+ };
for (p = coercionTypids; p->coercion; p++)
*p->coercion = initJsonItemCoercion(pstate, p->typid, returning);
@@ -4512,7 +4511,7 @@ static Node *
transformJsonParseExpr(ParseState *pstate, JsonParseExpr *jsexpr)
{
JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output,
- "JSON()");
+ "JSON()");
Node *arg;
if (jsexpr->unique_keys)
@@ -4544,8 +4543,8 @@ transformJsonParseExpr(ParseState *pstate, JsonParseExpr *jsexpr)
}
return makeJsonConstructorExpr(pstate, JSCTOR_JSON_PARSE, list_make1(arg), NULL,
- returning, jsexpr->unique_keys, false,
- jsexpr->location);
+ returning, jsexpr->unique_keys, false,
+ jsexpr->location);
}
/*
@@ -4556,13 +4555,13 @@ transformJsonScalarExpr(ParseState *pstate, JsonScalarExpr *jsexpr)
{
Node *arg = transformExprRecurse(pstate, (Node *) jsexpr->expr);
JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output,
- "JSON_SCALAR()");
+ "JSON_SCALAR()");
if (exprType(arg) == UNKNOWNOID)
arg = coerce_to_specific_type(pstate, arg, TEXTOID, "JSON_SCALAR");
return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SCALAR, list_make1(arg), NULL,
- returning, false, false, jsexpr->location);
+ returning, false, false, jsexpr->location);
}
/*
@@ -4586,5 +4585,5 @@ transformJsonSerializeExpr(ParseState *pstate, JsonSerializeExpr *expr)
}
return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SERIALIZE, list_make1(arg),
- NULL, returning, false, false, expr->location);
+ NULL, returning, false, false, expr->location);
}
diff --git a/src/backend/parser/parse_jsontable.c b/src/backend/parser/parse_jsontable.c
index 5ee63cf57f0..dbd3e66205d 100644
--- a/src/backend/parser/parse_jsontable.c
+++ b/src/backend/parser/parse_jsontable.c
@@ -33,31 +33,31 @@
/* Context for JSON_TABLE transformation */
typedef struct JsonTableContext
{
- ParseState *pstate; /* parsing state */
- JsonTable *table; /* untransformed node */
- TableFunc *tablefunc; /* transformed node */
- List *pathNames; /* list of all path and columns names */
- int pathNameId; /* path name id counter */
+ ParseState *pstate; /* parsing state */
+ JsonTable *table; /* untransformed node */
+ TableFunc *tablefunc; /* transformed node */
+ List *pathNames; /* list of all path and columns names */
+ int pathNameId; /* path name id counter */
Oid contextItemTypid; /* type oid of context item (json/jsonb) */
} JsonTableContext;
-static JsonTableParent * transformJsonTableColumns(JsonTableContext *cxt,
- JsonTablePlan *plan,
- List *columns,
- char *pathSpec,
- char **pathName,
- int location);
+static JsonTableParent *transformJsonTableColumns(JsonTableContext *cxt,
+ JsonTablePlan *plan,
+ List *columns,
+ char *pathSpec,
+ char **pathName,
+ int location);
static Node *
makeStringConst(char *str, int location)
{
- A_Const *n = makeNode(A_Const);
+ A_Const *n = makeNode(A_Const);
n->val.node.type = T_String;
n->val.sval.sval = str;
n->location = location;
- return (Node *)n;
+ return (Node *) n;
}
/*
@@ -122,7 +122,7 @@ transformJsonTableColumn(JsonTableColumn *jtc, Node *contextItemExpr,
static bool
isJsonTablePathNameDuplicate(JsonTableContext *cxt, const char *pathname)
{
- ListCell *lc;
+ ListCell *lc;
foreach(lc, cxt->pathNames)
{
@@ -342,7 +342,7 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
foreach(lc, columns)
{
JsonTableColumn *jtc = castNode(JsonTableColumn, lfirst(lc));
- Node *node;
+ Node *node;
if (jtc->coltype != JTC_NESTED)
continue;
@@ -369,10 +369,10 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
}
else
{
- Node *node1 =
- transformJsonTableChildPlan(cxt, plan->plan1, columns);
- Node *node2 =
- transformJsonTableChildPlan(cxt, plan->plan2, columns);
+ Node *node1 = transformJsonTableChildPlan(cxt, plan->plan1,
+ columns);
+ Node *node2 = transformJsonTableChildPlan(cxt, plan->plan2,
+ columns);
return makeJsonTableSiblingJoin(plan->join_type == JSTPJ_CROSS,
node1, node2);
@@ -396,7 +396,7 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
static bool
typeIsComposite(Oid typid)
{
- char typtype;
+ char typtype;
if (typid == JSONOID ||
typid == JSONBOID ||
@@ -406,7 +406,7 @@ typeIsComposite(Oid typid)
typtype = get_typtype(typid);
- if (typtype == TYPTYPE_COMPOSITE)
+ if (typtype == TYPTYPE_COMPOSITE)
return true;
if (typtype == TYPTYPE_DOMAIN)
@@ -424,7 +424,7 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns)
JsonTable *jt = cxt->table;
TableFunc *tf = cxt->tablefunc;
bool errorOnError = jt->on_error &&
- jt->on_error->btype == JSON_BEHAVIOR_ERROR;
+ jt->on_error->btype == JSON_BEHAVIOR_ERROR;
foreach(col, columns)
{
@@ -436,24 +436,23 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns)
if (rawc->name)
{
/* make sure column names are unique */
- ListCell *colname;
+ ListCell *colname;
foreach(colname, tf->colnames)
if (!strcmp((const char *) colname, rawc->name))
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("column name \"%s\" is not unique",
- rawc->name),
- parser_errposition(pstate, rawc->location)));
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("column name \"%s\" is not unique",
+ rawc->name),
+ parser_errposition(pstate, rawc->location)));
tf->colnames = lappend(tf->colnames,
makeString(pstrdup(rawc->name)));
}
/*
- * Determine the type and typmod for the new column. FOR
- * ORDINALITY columns are INTEGER by standard; the others are
- * user-specified.
+ * Determine the type and typmod for the new column. FOR ORDINALITY
+ * columns are INTEGER by standard; the others are user-specified.
*/
switch (rawc->coltype)
{
@@ -517,8 +516,8 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns)
tf->coltypmods = lappend_int(tf->coltypmods, typmod);
tf->colcollations = lappend_oid(tf->colcollations,
type_is_collatable(typid)
- ? DEFAULT_COLLATION_OID
- : InvalidOid);
+ ? DEFAULT_COLLATION_OID
+ : InvalidOid);
tf->colvalexprs = lappend(tf->colvalexprs, colexpr);
}
}
@@ -571,7 +570,7 @@ transformJsonTableColumns(JsonTableContext *cxt, JsonTablePlan *plan,
errdetail("JSON_TABLE columns must contain "
"explicit AS pathname specification if "
"explicit PLAN clause is used"),
- parser_errposition(cxt->pstate, location)));
+ parser_errposition(cxt->pstate, location)));
*pathName = generateJsonTablePathName(cxt);
}
@@ -662,14 +661,15 @@ transformJsonTable(ParseState *pstate, JsonTable *jt)
registerAllJsonTableColumns(&cxt, jt->columns);
-#if 0 /* XXX it' unclear from the standard whether root path name is mandatory or not */
+#if 0 /* XXX it' unclear from the standard whether
+ * root path name is mandatory or not */
if (plan && plan->plan_type != JSTP_DEFAULT && !rootPathName)
{
/* Assign root path name and create corresponding plan node */
JsonTablePlan *rootNode = makeNode(JsonTablePlan);
JsonTablePlan *rootPlan = (JsonTablePlan *)
- makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode,
- (Node *) plan, jt->location);
+ makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode,
+ (Node *) plan, jt->location);
rootPathName = generateJsonTablePathName(&cxt);
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index a49c985d36e..4d39cf95945 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -382,55 +382,56 @@ make_const(ParseState *pstate, A_Const *aconst)
break;
case T_Float:
- {
- /* could be an oversize integer as well as a float ... */
-
- int64 val64;
- char *endptr;
-
- errno = 0;
- val64 = strtoi64(aconst->val.fval.fval, &endptr, 10);
- if (errno == 0 && *endptr == '\0')
{
- /*
- * It might actually fit in int32. Probably only INT_MIN can
- * occur, but we'll code the test generally just to be sure.
- */
- int32 val32 = (int32) val64;
+ /* could be an oversize integer as well as a float ... */
- if (val64 == (int64) val32)
- {
- val = Int32GetDatum(val32);
+ int64 val64;
+ char *endptr;
- typeid = INT4OID;
- typelen = sizeof(int32);
- typebyval = true;
+ errno = 0;
+ val64 = strtoi64(aconst->val.fval.fval, &endptr, 10);
+ if (errno == 0 && *endptr == '\0')
+ {
+ /*
+ * It might actually fit in int32. Probably only INT_MIN
+ * can occur, but we'll code the test generally just to be
+ * sure.
+ */
+ int32 val32 = (int32) val64;
+
+ if (val64 == (int64) val32)
+ {
+ val = Int32GetDatum(val32);
+
+ typeid = INT4OID;
+ typelen = sizeof(int32);
+ typebyval = true;
+ }
+ else
+ {
+ val = Int64GetDatum(val64);
+
+ typeid = INT8OID;
+ typelen = sizeof(int64);
+ typebyval = FLOAT8PASSBYVAL; /* int8 and float8 alike */
+ }
}
else
{
- val = Int64GetDatum(val64);
-
- typeid = INT8OID;
- typelen = sizeof(int64);
- typebyval = FLOAT8PASSBYVAL; /* int8 and float8 alike */
+ /* arrange to report location if numeric_in() fails */
+ setup_parser_errposition_callback(&pcbstate, pstate, aconst->location);
+ val = DirectFunctionCall3(numeric_in,
+ CStringGetDatum(aconst->val.fval.fval),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1));
+ cancel_parser_errposition_callback(&pcbstate);
+
+ typeid = NUMERICOID;
+ typelen = -1; /* variable len */
+ typebyval = false;
}
+ break;
}
- else
- {
- /* arrange to report location if numeric_in() fails */
- setup_parser_errposition_callback(&pcbstate, pstate, aconst->location);
- val = DirectFunctionCall3(numeric_in,
- CStringGetDatum(aconst->val.fval.fval),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1));
- cancel_parser_errposition_callback(&pcbstate);
-
- typeid = NUMERICOID;
- typelen = -1; /* variable len */
- typebyval = false;
- }
- break;
- }
case T_Boolean:
val = BoolGetDatum(boolVal(&aconst->val));
diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c
index 31a43e034c6..f668abfcb33 100644
--- a/src/backend/parser/parse_param.c
+++ b/src/backend/parser/parse_param.c
@@ -65,7 +65,7 @@ static bool query_contains_extern_params_walker(Node *node, void *context);
*/
void
setup_parse_fixed_parameters(ParseState *pstate,
- const Oid *paramTypes, int numParams)
+ const Oid *paramTypes, int numParams)
{
FixedParamState *parstate = palloc(sizeof(FixedParamState));
@@ -81,7 +81,7 @@ setup_parse_fixed_parameters(ParseState *pstate,
*/
void
setup_parse_variable_parameters(ParseState *pstate,
- Oid **paramTypes, int *numParams)
+ Oid **paramTypes, int *numParams)
{
VarParamState *parstate = palloc(sizeof(VarParamState));
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 5448cb01fa7..00469763e88 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -1990,7 +1990,7 @@ addRangeTableEntryForTableFunc(ParseState *pstate,
{
RangeTblEntry *rte = makeNode(RangeTblEntry);
char *refname = alias ? alias->aliasname :
- pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table");
+ pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table");
Alias *eref;
int numaliases;
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index df2dcbfb99e..8b6e0bd5953 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -91,8 +91,8 @@ RelationGetPartitionDesc(Relation rel, bool omit_detached)
* cached descriptor too. We determine that based on the pg_inherits.xmin
* that was saved alongside that descriptor: if the xmin that was not in
* progress for that active snapshot is also not in progress for the
- * current active snapshot, then we can use it. Otherwise build one
- * from scratch.
+ * current active snapshot, then we can use it. Otherwise build one from
+ * scratch.
*/
if (omit_detached &&
rel->rd_partdesc_nodetached &&
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index f36c40e852f..2e146aac93b 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -984,7 +984,8 @@ rebuild_database_list(Oid newdb)
hctl.keysize = sizeof(Oid);
hctl.entrysize = sizeof(avl_dbase);
hctl.hcxt = tmpcxt;
- dbhash = hash_create("autovacuum db hash", 20, &hctl, /* magic number here FIXME */
+ dbhash = hash_create("autovacuum db hash", 20, &hctl, /* magic number here
+ * FIXME */
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* start by inserting the new database */
@@ -1683,12 +1684,12 @@ AutoVacWorkerMain(int argc, char *argv[])
char dbname[NAMEDATALEN];
/*
- * Report autovac startup to the cumulative stats system. We deliberately do
- * this before InitPostgres, so that the last_autovac_time will get
- * updated even if the connection attempt fails. This is to prevent
- * autovac from getting "stuck" repeatedly selecting an unopenable
- * database, rather than making any progress on stuff it can connect
- * to.
+ * Report autovac startup to the cumulative stats system. We
+ * deliberately do this before InitPostgres, so that the
+ * last_autovac_time will get updated even if the connection attempt
+ * fails. This is to prevent autovac from getting "stuck" repeatedly
+ * selecting an unopenable database, rather than making any progress
+ * on stuff it can connect to.
*/
pgstat_report_autovac(dbid);
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 30682b63b3f..40601aefd97 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -826,9 +826,9 @@ StartBackgroundWorker(void)
/*
* Create a per-backend PGPROC struct in shared memory, except in the
- * EXEC_BACKEND case where this was done in SubPostmasterMain. We must
- * do this before we can use LWLocks (and in the EXEC_BACKEND case we
- * already had to do some stuff with LWLocks).
+ * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
+ * this before we can use LWLocks (and in the EXEC_BACKEND case we already
+ * had to do some stuff with LWLocks).
*/
#ifndef EXEC_BACKEND
InitProcess();
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 8beff4a53cd..25e31c42e16 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -81,15 +81,14 @@ typedef struct PgArchData
int pgprocno; /* pgprocno of archiver process */
/*
- * Forces a directory scan in pgarch_readyXlog(). Protected by
- * arch_lck.
+ * Forces a directory scan in pgarch_readyXlog(). Protected by arch_lck.
*/
bool force_dir_scan;
slock_t arch_lck;
} PgArchData;
-char *XLogArchiveLibrary = "";
+char *XLogArchiveLibrary = "";
/* ----------
@@ -143,7 +142,7 @@ static bool pgarch_readyXlog(char *xlog);
static void pgarch_archiveDone(char *xlog);
static void pgarch_die(int code, Datum arg);
static void HandlePgArchInterrupts(void);
-static int ready_file_comparator(Datum a, Datum b, void *arg);
+static int ready_file_comparator(Datum a, Datum b, void *arg);
static void LoadArchiveLibrary(void);
static void call_archive_module_shutdown_callback(int code, Datum arg);
@@ -579,13 +578,13 @@ pgarch_readyXlog(char *xlog)
/*
* If we still have stored file names from the previous directory scan,
- * try to return one of those. We check to make sure the status file
- * is still present, as the archive_command for a previous file may
- * have already marked it done.
+ * try to return one of those. We check to make sure the status file is
+ * still present, as the archive_command for a previous file may have
+ * already marked it done.
*/
while (arch_files->arch_files_size > 0)
{
- struct stat st;
+ struct stat st;
char status_file[MAXPGPATH];
char *arch_file;
@@ -655,8 +654,8 @@ pgarch_readyXlog(char *xlog)
CStringGetDatum(basename), NULL) > 0)
{
/*
- * Remove the lowest priority file and add the current one to
- * the heap.
+ * Remove the lowest priority file and add the current one to the
+ * heap.
*/
arch_file = DatumGetCString(binaryheap_remove_first(arch_files->arch_heap));
strcpy(arch_file, basename);
@@ -677,8 +676,8 @@ pgarch_readyXlog(char *xlog)
binaryheap_build(arch_files->arch_heap);
/*
- * Fill arch_files array with the files to archive in ascending order
- * of priority.
+ * Fill arch_files array with the files to archive in ascending order of
+ * priority.
*/
arch_files->arch_files_size = arch_files->arch_heap->bh_size;
for (int i = 0; i < arch_files->arch_files_size; i++)
@@ -702,10 +701,10 @@ pgarch_readyXlog(char *xlog)
static int
ready_file_comparator(Datum a, Datum b, void *arg)
{
- char *a_str = DatumGetCString(a);
- char *b_str = DatumGetCString(b);
- bool a_history = IsTLHistoryFileName(a_str);
- bool b_history = IsTLHistoryFileName(b_str);
+ char *a_str = DatumGetCString(a);
+ char *b_str = DatumGetCString(b);
+ bool a_history = IsTLHistoryFileName(a_str);
+ bool b_history = IsTLHistoryFileName(b_str);
/* Timeline history files always have the highest priority. */
if (a_history != b_history)
@@ -793,8 +792,8 @@ HandlePgArchInterrupts(void)
if (archiveLibChanged)
{
/*
- * Call the currently loaded archive module's shutdown callback, if
- * one is defined.
+ * Call the currently loaded archive module's shutdown callback,
+ * if one is defined.
*/
call_archive_module_shutdown_callback(0, 0);
@@ -803,8 +802,8 @@ HandlePgArchInterrupts(void)
* load the new one, but there is presently no mechanism for
* unloading a library (see the comment above
* internal_load_library()). To deal with this, we simply restart
- * the archiver. The new archive module will be loaded when the new
- * archiver process starts up.
+ * the archiver. The new archive module will be loaded when the
+ * new archiver process starts up.
*/
ereport(LOG,
(errmsg("restarting archiver process because value of "
@@ -828,9 +827,8 @@ LoadArchiveLibrary(void)
memset(&ArchiveContext, 0, sizeof(ArchiveModuleCallbacks));
/*
- * If shell archiving is enabled, use our special initialization
- * function. Otherwise, load the library and call its
- * _PG_archive_module_init().
+ * If shell archiving is enabled, use our special initialization function.
+ * Otherwise, load the library and call its _PG_archive_module_init().
*/
if (XLogArchiveLibrary[0] == '\0')
archive_init = shell_archive_init;
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 8e61b3471ca..bf591f048d4 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2859,8 +2859,8 @@ pmdie(SIGNAL_ARGS)
/*
* If we reached normal running, we go straight to waiting for
- * client backends to exit. If already in PM_STOP_BACKENDS or
- * a later state, do not change it.
+ * client backends to exit. If already in PM_STOP_BACKENDS or a
+ * later state, do not change it.
*/
if (pmState == PM_RUN || pmState == PM_HOT_STANDBY)
connsAllowed = false;
diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c
index 29cf8f18e1a..f99186eab7d 100644
--- a/src/backend/postmaster/startup.c
+++ b/src/backend/postmaster/startup.c
@@ -75,7 +75,7 @@ static volatile sig_atomic_t startup_progress_timer_expired = false;
/*
* Time between progress updates for long-running startup operations.
*/
-int log_startup_progress_interval = 10000; /* 10 sec */
+int log_startup_progress_interval = 10000; /* 10 sec */
/* Signal handlers */
static void StartupProcTriggerHandler(SIGNAL_ARGS);
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index 77aebb244cb..e926f8c27c7 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -297,9 +297,9 @@ HandleWalWriterInterrupts(void)
/*
* Force reporting remaining WAL statistics at process exit.
*
- * Since pgstat_report_wal is invoked with 'force' is false in main loop
- * to avoid overloading the cumulative stats system, there may exist
- * unreported stats counters for the WAL writer.
+ * Since pgstat_report_wal is invoked with 'force' is false in main
+ * loop to avoid overloading the cumulative stats system, there may
+ * exist unreported stats counters for the WAL writer.
*/
pgstat_report_wal(true);
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index 6e84f42cb24..e1f9df09180 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -234,8 +234,8 @@ pg_set_regex_collation(Oid collation)
if (!OidIsValid(collation))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
@@ -253,9 +253,9 @@ pg_set_regex_collation(Oid collation)
else
{
/*
- * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T;
- * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not
- * have to be considered below.
+ * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T; the
+ * case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not have to
+ * be considered below.
*/
pg_regex_locale = pg_newlocale_from_collation(collation);
diff --git a/src/backend/replication/backup_manifest.c b/src/backend/replication/backup_manifest.c
index 7e22f9e48cf..d47ab4c41e3 100644
--- a/src/backend/replication/backup_manifest.c
+++ b/src/backend/replication/backup_manifest.c
@@ -312,7 +312,7 @@ AddWALInfoToBackupManifest(backup_manifest_info *manifest, XLogRecPtr startptr,
* Finalize the backup manifest, and send it to the client.
*/
void
-SendBackupManifest(backup_manifest_info *manifest, bbsink * sink)
+SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
{
uint8 checksumbuf[PG_SHA256_DIGEST_LENGTH];
char checksumstringbuf[PG_SHA256_DIGEST_STRING_LENGTH];
diff --git a/src/backend/replication/basebackup_copy.c b/src/backend/replication/basebackup_copy.c
index 90daeff09ce..cabb0772402 100644
--- a/src/backend/replication/basebackup_copy.c
+++ b/src/backend/replication/basebackup_copy.c
@@ -124,18 +124,18 @@ bbsink_copystream_begin_backup(bbsink *sink)
{
bbsink_copystream *mysink = (bbsink_copystream *) sink;
bbsink_state *state = sink->bbs_state;
- char *buf;
+ char *buf;
/*
* Initialize buffer. We ultimately want to send the archive and manifest
* data by means of CopyData messages where the payload portion of each
* message begins with a type byte. However, basebackup.c expects the
* buffer to be aligned, so we can't just allocate one extra byte for the
- * type byte. Instead, allocate enough extra bytes that the portion of
- * the buffer we reveal to our callers can be aligned, while leaving room
- * to slip the type byte in just beforehand. That will allow us to ship
- * the data with a single call to pq_putmessage and without needing any
- * extra copying.
+ * type byte. Instead, allocate enough extra bytes that the portion of the
+ * buffer we reveal to our callers can be aligned, while leaving room to
+ * slip the type byte in just beforehand. That will allow us to ship the
+ * data with a single call to pq_putmessage and without needing any extra
+ * copying.
*/
buf = palloc(mysink->base.bbs_buffer_length + MAXIMUM_ALIGNOF);
mysink->msgbuffer = buf + (MAXIMUM_ALIGNOF - 1);
diff --git a/src/backend/replication/basebackup_gzip.c b/src/backend/replication/basebackup_gzip.c
index 44f28ceba45..ef2b954946a 100644
--- a/src/backend/replication/basebackup_gzip.c
+++ b/src/backend/replication/basebackup_gzip.c
@@ -68,7 +68,7 @@ bbsink_gzip_new(bbsink *next, pg_compress_specification *compress)
return NULL; /* keep compiler quiet */
#else
bbsink_gzip *sink;
- int compresslevel;
+ int compresslevel;
Assert(next != NULL);
@@ -118,8 +118,8 @@ static void
bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name)
{
bbsink_gzip *mysink = (bbsink_gzip *) sink;
- char *gz_archive_name;
- z_stream *zs = &mysink->zstream;
+ char *gz_archive_name;
+ z_stream *zs = &mysink->zstream;
/* Initialize compressor object. */
memset(zs, 0, sizeof(z_stream));
@@ -129,10 +129,10 @@ bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name)
zs->avail_out = sink->bbs_next->bbs_buffer_length;
/*
- * We need to use deflateInit2() rather than deflateInit() here so that
- * we can request a gzip header rather than a zlib header. Otherwise, we
- * want to supply the same values that would have been used by default
- * if we had just called deflateInit().
+ * We need to use deflateInit2() rather than deflateInit() here so that we
+ * can request a gzip header rather than a zlib header. Otherwise, we want
+ * to supply the same values that would have been used by default if we
+ * had just called deflateInit().
*
* Per the documentation for deflateInit2, the third argument must be
* Z_DEFLATED; the fourth argument is the number of "window bits", by
@@ -147,9 +147,8 @@ bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name)
errmsg("could not initialize compression library"));
/*
- * Add ".gz" to the archive name. Note that the pg_basebackup -z
- * produces archives named ".tar.gz" rather than ".tgz", so we match
- * that here.
+ * Add ".gz" to the archive name. Note that the pg_basebackup -z produces
+ * archives named ".tar.gz" rather than ".tgz", so we match that here.
*/
gz_archive_name = psprintf("%s.gz", archive_name);
Assert(sink->bbs_next != NULL);
@@ -172,7 +171,7 @@ static void
bbsink_gzip_archive_contents(bbsink *sink, size_t len)
{
bbsink_gzip *mysink = (bbsink_gzip *) sink;
- z_stream *zs = &mysink->zstream;
+ z_stream *zs = &mysink->zstream;
/* Compress data from input buffer. */
zs->next_in = (uint8 *) mysink->base.bbs_buffer;
@@ -180,7 +179,7 @@ bbsink_gzip_archive_contents(bbsink *sink, size_t len)
while (zs->avail_in > 0)
{
- int res;
+ int res;
/* Write output data into unused portion of output buffer. */
Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length);
@@ -230,7 +229,7 @@ static void
bbsink_gzip_end_archive(bbsink *sink)
{
bbsink_gzip *mysink = (bbsink_gzip *) sink;
- z_stream *zs = &mysink->zstream;
+ z_stream *zs = &mysink->zstream;
/* There is no more data available. */
zs->next_in = (uint8 *) mysink->base.bbs_buffer;
@@ -238,7 +237,7 @@ bbsink_gzip_end_archive(bbsink *sink)
while (1)
{
- int res;
+ int res;
/* Write output data into unused portion of output buffer. */
Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length);
@@ -248,8 +247,8 @@ bbsink_gzip_end_archive(bbsink *sink)
mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written;
/*
- * As bbsink_gzip_archive_contents, but pass Z_FINISH since there
- * is no more input.
+ * As bbsink_gzip_archive_contents, but pass Z_FINISH since there is
+ * no more input.
*/
res = deflate(zs, Z_FINISH);
if (res == Z_STREAM_ERROR)
@@ -260,8 +259,8 @@ bbsink_gzip_end_archive(bbsink *sink)
mysink->base.bbs_next->bbs_buffer_length - zs->avail_out;
/*
- * Apparently we had no data in the output buffer and deflate()
- * was not able to add any. We must be done.
+ * Apparently we had no data in the output buffer and deflate() was
+ * not able to add any. We must be done.
*/
if (mysink->bytes_written == 0)
break;
diff --git a/src/backend/replication/basebackup_lz4.c b/src/backend/replication/basebackup_lz4.c
index 65e774eff62..c9d19b6c448 100644
--- a/src/backend/replication/basebackup_lz4.c
+++ b/src/backend/replication/basebackup_lz4.c
@@ -68,7 +68,7 @@ bbsink_lz4_new(bbsink *next, pg_compress_specification *compress)
return NULL; /* keep compiler quiet */
#else
bbsink_lz4 *sink;
- int compresslevel;
+ int compresslevel;
Assert(next != NULL);
diff --git a/src/backend/replication/basebackup_server.c b/src/backend/replication/basebackup_server.c
index 54e6829d2be..9b4847d90cc 100644
--- a/src/backend/replication/basebackup_server.c
+++ b/src/backend/replication/basebackup_server.c
@@ -77,10 +77,11 @@ bbsink_server_new(bbsink *next, char *pathname)
/*
* It's not a good idea to store your backups in the same directory that
- * you're backing up. If we allowed a relative path here, that could easily
- * happen accidentally, so we don't. The user could still accomplish the
- * same thing by including the absolute path to $PGDATA in the pathname,
- * but that's likely an intentional bad decision rather than an accident.
+ * you're backing up. If we allowed a relative path here, that could
+ * easily happen accidentally, so we don't. The user could still
+ * accomplish the same thing by including the absolute path to $PGDATA in
+ * the pathname, but that's likely an intentional bad decision rather than
+ * an accident.
*/
if (!is_absolute_path(pathname))
ereport(ERROR,
@@ -90,14 +91,15 @@ bbsink_server_new(bbsink *next, char *pathname)
switch (pg_check_dir(pathname))
{
case 0:
+
/*
- * Does not exist, so create it using the same permissions we'd use
- * for a new subdirectory of the data directory itself.
+ * Does not exist, so create it using the same permissions we'd
+ * use for a new subdirectory of the data directory itself.
*/
if (MakePGDirectory(pathname) < 0)
ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m", pathname)));
+ (errcode_for_file_access(),
+ errmsg("could not create directory \"%s\": %m", pathname)));
break;
case 1:
diff --git a/src/backend/replication/basebackup_target.c b/src/backend/replication/basebackup_target.c
index 243a2bacfef..9f73457320e 100644
--- a/src/backend/replication/basebackup_target.c
+++ b/src/backend/replication/basebackup_target.c
@@ -80,9 +80,9 @@ BaseBackupAddTarget(char *name,
/*
* We found one, so update it.
*
- * It is probably not a great idea to call BaseBackupAddTarget
- * for the same name multiple times, but if it happens, this
- * seems like the sanest behavior.
+ * It is probably not a great idea to call BaseBackupAddTarget for
+ * the same name multiple times, but if it happens, this seems
+ * like the sanest behavior.
*/
ttype->check_detail = check_detail;
ttype->get_sink = get_sink;
@@ -91,9 +91,9 @@ BaseBackupAddTarget(char *name,
}
/*
- * We use TopMemoryContext for allocations here to make sure that the
- * data we need doesn't vanish under us; that's also why we copy the
- * target name into a newly-allocated chunk of memory.
+ * We use TopMemoryContext for allocations here to make sure that the data
+ * we need doesn't vanish under us; that's also why we copy the target
+ * name into a newly-allocated chunk of memory.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
ttype = palloc(sizeof(BaseBackupTargetType));
diff --git a/src/backend/replication/basebackup_zstd.c b/src/backend/replication/basebackup_zstd.c
index d767b26f4e3..b23a37b29ed 100644
--- a/src/backend/replication/basebackup_zstd.c
+++ b/src/backend/replication/basebackup_zstd.c
@@ -108,9 +108,9 @@ bbsink_zstd_begin_backup(bbsink *sink)
if ((compress->options & PG_COMPRESSION_OPTION_WORKERS) != 0)
{
/*
- * On older versions of libzstd, this option does not exist, and trying
- * to set it will fail. Similarly for newer versions if they are
- * compiled without threading support.
+ * On older versions of libzstd, this option does not exist, and
+ * trying to set it will fail. Similarly for newer versions if they
+ * are compiled without threading support.
*/
ret = ZSTD_CCtx_setParameter(mysink->cctx, ZSTD_c_nbWorkers,
compress->workers);
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 6303647fe0f..aa2427ba73f 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -92,7 +92,7 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState *recor
{
XLogRecordBuffer buf;
TransactionId txid;
- RmgrData rmgr;
+ RmgrData rmgr;
buf.origptr = ctx->reader->ReadRecPtr;
buf.endptr = ctx->reader->EndRecPtr;
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 0adb2d1d665..6a4b2d43063 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -344,9 +344,9 @@ retry:
}
/*
- * We don't allow to invoke more sync workers once we have reached the sync
- * worker limit per subscription. So, just return silently as we might get
- * here because of an otherwise harmless race condition.
+ * We don't allow to invoke more sync workers once we have reached the
+ * sync worker limit per subscription. So, just return silently as we
+ * might get here because of an otherwise harmless race condition.
*/
if (OidIsValid(relid) && nsyncworkers >= max_sync_workers_per_subscription)
{
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 6887dc23f61..da7bd1321cb 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -646,8 +646,8 @@ ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create,
}
/*
- * If the cache wasn't hit or it yielded a "does-not-exist" and we want
- * to create an entry.
+ * If the cache wasn't hit or it yielded a "does-not-exist" and we want to
+ * create an entry.
*/
/* search the lookup table */
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 49ceec3bdc8..61aee61b8ee 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -786,11 +786,11 @@ fetch_remote_table_info(char *nspname, char *relname,
/*
* Fetch info about column lists for the relation (from all the
- * publications). We unnest the int2vector values, because that
- * makes it easier to combine lists by simply adding the attnums
- * to a new bitmap (without having to parse the int2vector data).
- * This preserves NULL values, so that if one of the publications
- * has no column list, we'll know that.
+ * publications). We unnest the int2vector values, because that makes
+ * it easier to combine lists by simply adding the attnums to a new
+ * bitmap (without having to parse the int2vector data). This
+ * preserves NULL values, so that if one of the publications has no
+ * column list, we'll know that.
*/
resetStringInfo(&cmd);
appendStringInfo(&cmd,
@@ -816,15 +816,15 @@ fetch_remote_table_info(char *nspname, char *relname,
nspname, relname, pubres->err)));
/*
- * Merge the column lists (from different publications) by creating
- * a single bitmap with all the attnums. If we find a NULL value,
- * that means one of the publications has no column list for the
- * table we're syncing.
+ * Merge the column lists (from different publications) by creating a
+ * single bitmap with all the attnums. If we find a NULL value, that
+ * means one of the publications has no column list for the table
+ * we're syncing.
*/
slot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
while (tuplestore_gettupleslot(pubres->tuplestore, true, false, slot))
{
- Datum cfval = slot_getattr(slot, 1, &isnull);
+ Datum cfval = slot_getattr(slot, 1, &isnull);
/* NULL means empty column list, so we're done. */
if (isnull)
@@ -835,7 +835,7 @@ fetch_remote_table_info(char *nspname, char *relname,
}
included_cols = bms_add_member(included_cols,
- DatumGetInt16(cfval));
+ DatumGetInt16(cfval));
ExecClearTuple(slot);
}
@@ -1056,8 +1056,8 @@ copy_table(Relation rel)
quote_qualified_identifier(lrel.nspname, lrel.relname));
/*
- * XXX Do we need to list the columns in all cases? Maybe we're replicating
- * all columns?
+ * XXX Do we need to list the columns in all cases? Maybe we're
+ * replicating all columns?
*/
for (int i = 0; i < lrel.natts; i++)
{
@@ -1321,10 +1321,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
/*
* COPY FROM does not honor RLS policies. That is not a problem for
- * subscriptions owned by roles with BYPASSRLS privilege (or superuser, who
- * has it implicitly), but other roles should not be able to circumvent
- * RLS. Disallow logical replication into RLS enabled relations for such
- * roles.
+ * subscriptions owned by roles with BYPASSRLS privilege (or superuser,
+ * who has it implicitly), but other roles should not be able to
+ * circumvent RLS. Disallow logical replication into RLS enabled
+ * relations for such roles.
*/
if (check_enable_rls(RelationGetRelid(rel), InvalidOid, false) == RLS_ENABLED)
ereport(ERROR,
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 7da7823c352..725a21b55ec 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -1608,8 +1608,8 @@ GetRelationIdentityOrPK(Relation rel)
static void
TargetPrivilegesCheck(Relation rel, AclMode mode)
{
- Oid relid;
- AclResult aclresult;
+ Oid relid;
+ AclResult aclresult;
relid = RelationGetRelid(rel);
aclresult = pg_class_aclcheck(relid, GetUserId(), mode);
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 406ad84e1d6..42c06af2391 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -174,8 +174,8 @@ typedef struct RelationSyncEntry
Bitmapset *columns;
/*
- * Private context to store additional data for this entry - state for
- * the row filter expressions, column list, etc.
+ * Private context to store additional data for this entry - state for the
+ * row filter expressions, column list, etc.
*/
MemoryContext entry_cxt;
} RelationSyncEntry;
@@ -206,9 +206,8 @@ typedef struct RelationSyncEntry
*/
typedef struct PGOutputTxnData
{
- bool sent_begin_txn; /* flag indicating whether BEGIN has
- * been sent */
-} PGOutputTxnData;
+ bool sent_begin_txn; /* flag indicating whether BEGIN has been sent */
+} PGOutputTxnData;
/* Map used to remember which relation schemas we sent. */
static HTAB *RelationSyncCache = NULL;
@@ -511,9 +510,9 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
* using bandwidth on something with little/no use for logical replication.
*/
static void
-pgoutput_begin_txn(LogicalDecodingContext * ctx, ReorderBufferTXN * txn)
+pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{
- PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context,
+ PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context,
sizeof(PGOutputTxnData));
txn->output_plugin_private = txndata;
@@ -987,7 +986,8 @@ pgoutput_column_list_init(PGOutputData *data, List *publications,
*
* All the given publication-table mappings must be checked.
*
- * Multiple publications might have multiple column lists for this relation.
+ * Multiple publications might have multiple column lists for this
+ * relation.
*
* FOR ALL TABLES and FOR ALL TABLES IN SCHEMA implies "don't use column
* list" so it takes precedence.
@@ -1005,8 +1005,9 @@ pgoutput_column_list_init(PGOutputData *data, List *publications,
bool pub_no_list = true;
/*
- * If the publication is FOR ALL TABLES then it is treated the same as if
- * there are no column lists (even if other publications have a list).
+ * If the publication is FOR ALL TABLES then it is treated the same as
+ * if there are no column lists (even if other publications have a
+ * list).
*/
if (!pub->alltables)
{
@@ -1014,8 +1015,8 @@ pgoutput_column_list_init(PGOutputData *data, List *publications,
* Check for the presence of a column list in this publication.
*
* Note: If we find no pg_publication_rel row, it's a publication
- * defined for a whole schema, so it can't have a column list, just
- * like a FOR ALL TABLES publication.
+ * defined for a whole schema, so it can't have a column list,
+ * just like a FOR ALL TABLES publication.
*/
cftuple = SearchSysCache2(PUBLICATIONRELMAP,
ObjectIdGetDatum(entry->publish_as_relid),
@@ -1221,9 +1222,9 @@ pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot,
* For updates, we can have only a new tuple when none of the replica
* identity columns changed and none of those columns have external data
* but we still need to evaluate the row filter for the new tuple as the
- * existing values of those columns might not match the filter. Also, users
- * can use constant expressions in the row filter, so we anyway need to
- * evaluate it for the new tuple.
+ * existing values of those columns might not match the filter. Also,
+ * users can use constant expressions in the row filter, so we anyway need
+ * to evaluate it for the new tuple.
*
* For deletes, we only have the old tuple.
*/
@@ -1674,8 +1675,7 @@ pgoutput_message(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
xid = txn->xid;
/*
- * Output BEGIN if we haven't yet. Avoid for non-transactional
- * messages.
+ * Output BEGIN if we haven't yet. Avoid for non-transactional messages.
*/
if (transactional)
{
@@ -2079,15 +2079,15 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
/*
* Under what relid should we publish changes in this publication?
- * We'll use the top-most relid across all publications. Also track
- * the ancestor level for this publication.
+ * We'll use the top-most relid across all publications. Also
+ * track the ancestor level for this publication.
*/
- Oid pub_relid = relid;
- int ancestor_level = 0;
+ Oid pub_relid = relid;
+ int ancestor_level = 0;
/*
- * If this is a FOR ALL TABLES publication, pick the partition root
- * and set the ancestor level accordingly.
+ * If this is a FOR ALL TABLES publication, pick the partition
+ * root and set the ancestor level accordingly.
*/
if (pub->alltables)
{
@@ -2156,18 +2156,18 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
/*
* We want to publish the changes as the top-most ancestor
- * across all publications. So we need to check if the
- * already calculated level is higher than the new one. If
- * yes, we can ignore the new value (as it's a child).
- * Otherwise the new value is an ancestor, so we keep it.
+ * across all publications. So we need to check if the already
+ * calculated level is higher than the new one. If yes, we can
+ * ignore the new value (as it's a child). Otherwise the new
+ * value is an ancestor, so we keep it.
*/
if (publish_ancestor_level > ancestor_level)
continue;
/*
- * If we found an ancestor higher up in the tree, discard
- * the list of publications through which we replicate it,
- * and use the new ancestor.
+ * If we found an ancestor higher up in the tree, discard the
+ * list of publications through which we replicate it, and use
+ * the new ancestor.
*/
if (publish_ancestor_level < ancestor_level)
{
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 5c778f5333b..e5c2102bcd5 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -504,8 +504,8 @@ retry:
MyReplicationSlot = s;
/*
- * The call to pgstat_acquire_replslot() protects against stats for
- * a different slot, from before a restart or such, being present during
+ * The call to pgstat_acquire_replslot() protects against stats for a
+ * different slot, from before a restart or such, being present during
* pgstat_report_replslot().
*/
if (SlotIsLogical(s))
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 3c9411e2213..b369d28a806 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -1406,9 +1406,9 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
if (!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
{
/*
- * Only superusers and roles with privileges of pg_read_all_stats
- * can see details. Other users only get the pid value to know whether
- * it is a WAL receiver, but no details.
+ * Only superusers and roles with privileges of pg_read_all_stats can
+ * see details. Other users only get the pid value to know whether it
+ * is a WAL receiver, but no details.
*/
MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
}
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index c6c196b2fab..e42671722a8 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -1505,9 +1505,9 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId
* When skipping empty transactions in synchronous replication, we send a
* keepalive message to avoid delaying such transactions.
*
- * It is okay to check sync_standbys_defined flag without lock here as
- * in the worst case we will just send an extra keepalive message when it
- * is really not required.
+ * It is okay to check sync_standbys_defined flag without lock here as in
+ * the worst case we will just send an extra keepalive message when it is
+ * really not required.
*/
if (skipped_xact &&
SyncRepRequested() &&
diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c
index b6f31849616..c1c27e67d47 100644
--- a/src/backend/statistics/dependencies.c
+++ b/src/backend/statistics/dependencies.c
@@ -354,7 +354,7 @@ statext_dependencies_build(StatsBuildData *data)
/* result */
MVDependencies *dependencies = NULL;
- MemoryContext cxt;
+ MemoryContext cxt;
Assert(data->nattnums >= 2);
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index e02ea3a977c..ae13011d275 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -673,9 +673,8 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
{
/*
* It's now safe to pin the buffer. We can't pin first and ask
- * questions later, because it might confuse code paths
- * like InvalidateBuffer() if we pinned a random non-matching
- * buffer.
+ * questions later, because it might confuse code paths like
+ * InvalidateBuffer() if we pinned a random non-matching buffer.
*/
if (have_private_ref)
PinBuffer(bufHdr, NULL); /* bump pin count */
@@ -2945,10 +2944,10 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
{
/*
- * Not every table AM uses BLCKSZ wide fixed size blocks.
- * Therefore tableam returns the size in bytes - but for the
- * purpose of this routine, we want the number of blocks.
- * Therefore divide, rounding up.
+ * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
+ * tableam returns the size in bytes - but for the purpose of this
+ * routine, we want the number of blocks. Therefore divide, rounding
+ * up.
*/
uint64 szbytes;
@@ -2958,7 +2957,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
}
else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
{
- return smgrnblocks(RelationGetSmgr(relation), forkNum);
+ return smgrnblocks(RelationGetSmgr(relation), forkNum);
}
else
Assert(false);
@@ -3707,9 +3706,9 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
BufferAccessStrategy bstrategy_dst;
/*
- * In general, we want to write WAL whenever wal_level > 'minimal', but
- * we can skip it when copying any fork of an unlogged relation other
- * than the init fork.
+ * In general, we want to write WAL whenever wal_level > 'minimal', but we
+ * can skip it when copying any fork of an unlogged relation other than
+ * the init fork.
*/
use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
@@ -3779,9 +3778,9 @@ void
CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
bool permanent)
{
- Relation src_rel;
- Relation dst_rel;
- char relpersistence;
+ Relation src_rel;
+ Relation dst_rel;
+ char relpersistence;
/* Set the relpersistence. */
relpersistence = permanent ?
@@ -3789,9 +3788,9 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
/*
* We can't use a real relcache entry for a relation in some other
- * database, but since we're only going to access the fields related
- * to physical storage, a fake one is good enough. If we didn't do this
- * and used the smgr layer directly, we would have to worry about
+ * database, but since we're only going to access the fields related to
+ * physical storage, a fake one is good enough. If we didn't do this and
+ * used the smgr layer directly, we would have to worry about
* invalidations.
*/
src_rel = CreateFakeRelcacheEntry(src_rnode);
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 25c310f6757..ca22336e35d 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1172,8 +1172,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
*
* We have to sort them logically, because in KnownAssignedXidsAdd we
* call TransactionIdFollowsOrEquals and so on. But we know these XIDs
- * come from RUNNING_XACTS, which means there are only normal XIDs from
- * the same epoch, so this is safe.
+ * come from RUNNING_XACTS, which means there are only normal XIDs
+ * from the same epoch, so this is safe.
*/
qsort(xids, nxids, sizeof(TransactionId), xidLogicalComparator);
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index 603cf9b0fa7..6139c622e0b 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -534,9 +534,9 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait,
}
/*
- * If the caller has requested force flush or we have written more than 1/4
- * of the ring size, mark it as written in shared memory and notify the
- * receiver.
+ * If the caller has requested force flush or we have written more than
+ * 1/4 of the ring size, mark it as written in shared memory and notify
+ * the receiver.
*/
if (force_flush || mqh->mqh_send_pending > (mq->mq_ring_size >> 2))
{
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index 2861c03e04b..59310b708fb 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -208,10 +208,11 @@ SInvalShmemSize(void)
/*
* In Hot Standby mode, the startup process requests a procState array
- * slot using InitRecoveryTransactionEnvironment(). Even though MaxBackends
- * doesn't account for the startup process, it is guaranteed to get a
- * free slot. This is because the autovacuum launcher and worker processes,
- * which are included in MaxBackends, are not started in Hot Standby mode.
+ * slot using InitRecoveryTransactionEnvironment(). Even though
+ * MaxBackends doesn't account for the startup process, it is guaranteed
+ * to get a free slot. This is because the autovacuum launcher and worker
+ * processes, which are included in MaxBackends, are not started in Hot
+ * Standby mode.
*/
size = add_size(size, mul_size(sizeof(ProcState), MaxBackends));
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index cc15396789b..a3d367db511 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -795,7 +795,7 @@ PageRepairFragmentation(Page page)
if (finalusedlp != nline)
{
/* The last line pointer is not the last used line pointer */
- int nunusedend = nline - finalusedlp;
+ int nunusedend = nline - finalusedlp;
Assert(nunused >= nunusedend && nunusedend > 0);
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 304cce135aa..8b6b5bbaaab 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -655,7 +655,7 @@ pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree,
ResetUsage();
query = parse_analyze_fixedparams(parsetree, query_string, paramTypes, numParams,
- queryEnv);
+ queryEnv);
if (log_parser_stats)
ShowUsage("PARSE ANALYSIS STATISTICS");
@@ -694,7 +694,7 @@ pg_analyze_and_rewrite_varparams(RawStmt *parsetree,
ResetUsage();
query = parse_analyze_varparams(parsetree, query_string, paramTypes, numParams,
- queryEnv);
+ queryEnv);
/*
* Check all parameter types got determined.
@@ -1164,7 +1164,7 @@ exec_simple_query(const char *query_string)
oldcontext = MemoryContextSwitchTo(MessageContext);
querytree_list = pg_analyze_and_rewrite_fixedparams(parsetree, query_string,
- NULL, 0, NULL);
+ NULL, 0, NULL);
plantree_list = pg_plan_queries(querytree_list, query_string,
CURSOR_OPT_PARALLEL_OK, NULL);
@@ -4377,11 +4377,12 @@ PostgresMain(const char *dbname, const char *username)
* Note: this includes fflush()'ing the last of the prior output.
*
* This is also a good time to flush out collected statistics to the
- * cumulative stats system, and to update the PS stats display. We avoid doing
- * those every time through the message loop because it'd slow down
- * processing of batched messages, and because we don't want to report
- * uncommitted updates (that confuses autovacuum). The notification
- * processor wants a call too, if we are not in a transaction block.
+ * cumulative stats system, and to update the PS stats display. We
+ * avoid doing those every time through the message loop because it'd
+ * slow down processing of batched messages, and because we don't want
+ * to report uncommitted updates (that confuses autovacuum). The
+ * notification processor wants a call too, if we are not in a
+ * transaction block.
*
* Also, if an idle timeout is enabled, start the timer for that.
*/
@@ -4415,7 +4416,7 @@ PostgresMain(const char *dbname, const char *username)
}
else
{
- long stats_timeout;
+ long stats_timeout;
/*
* Process incoming notifies (including self-notifies), if
@@ -4470,8 +4471,9 @@ PostgresMain(const char *dbname, const char *username)
/*
* (4) turn off the idle-in-transaction, idle-session and
- * idle-stats-update timeouts if active. We do this before step (5) so
- * that any last-moment timeout is certain to be detected in step (5).
+ * idle-stats-update timeouts if active. We do this before step (5)
+ * so that any last-moment timeout is certain to be detected in step
+ * (5).
*
* At most one of these timeouts will be active, so there's no need to
* worry about combining the timeout.c calls into one.
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 78e951a6bca..2570e5e6301 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -3996,7 +3996,8 @@ hash_array(PG_FUNCTION_ARGS)
/*
* Make fake type cache entry structure. Note that we can't just
- * modify typentry, since that points directly into the type cache.
+ * modify typentry, since that points directly into the type
+ * cache.
*/
record_typentry = palloc0(sizeof(*record_typentry));
record_typentry->type_id = element_type;
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 0576764ac4b..b4a2c8d2197 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -112,8 +112,8 @@ calculate_database_size(Oid dbOid)
AclResult aclresult;
/*
- * User must have connect privilege for target database or have privileges of
- * pg_read_all_stats
+ * User must have connect privilege for target database or have privileges
+ * of pg_read_all_stats
*/
aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT);
if (aclresult != ACLCHECK_OK &&
@@ -196,9 +196,9 @@ calculate_tablespace_size(Oid tblspcOid)
AclResult aclresult;
/*
- * User must have privileges of pg_read_all_stats or have CREATE privilege for
- * target tablespace, either explicitly granted or implicitly because it
- * is default for current database.
+ * User must have privileges of pg_read_all_stats or have CREATE privilege
+ * for target tablespace, either explicitly granted or implicitly because
+ * it is default for current database.
*/
if (tblspcOid != MyDatabaseTableSpace &&
!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 97a4544ffc6..e909c1a200c 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -898,7 +898,7 @@ static const KeyWord DCH_keywords[] = {
{"month", 5, DCH_month, false, FROM_CHAR_DATE_GREGORIAN},
{"mon", 3, DCH_mon, false, FROM_CHAR_DATE_GREGORIAN},
{"ms", 2, DCH_MS, true, FROM_CHAR_DATE_NONE},
- {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE}, /* o */
+ {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE}, /* o */
{"p.m.", 4, DCH_p_m, false, FROM_CHAR_DATE_NONE}, /* p */
{"pm", 2, DCH_pm, false, FROM_CHAR_DATE_NONE},
{"q", 1, DCH_Q, true, FROM_CHAR_DATE_NONE}, /* q */
@@ -906,7 +906,7 @@ static const KeyWord DCH_keywords[] = {
{"sssss", 5, DCH_SSSS, true, FROM_CHAR_DATE_NONE}, /* s */
{"ssss", 4, DCH_SSSS, true, FROM_CHAR_DATE_NONE},
{"ss", 2, DCH_SS, true, FROM_CHAR_DATE_NONE},
- {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE}, /* t */
+ {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE}, /* t */
{"tzm", 3, DCH_TZM, true, FROM_CHAR_DATE_NONE},
{"tz", 2, DCH_tz, false, FROM_CHAR_DATE_NONE},
{"us", 2, DCH_US, true, FROM_CHAR_DATE_NONE}, /* u */
@@ -1675,8 +1675,8 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
if (!OidIsValid(collid))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
@@ -1797,8 +1797,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
if (!OidIsValid(collid))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
@@ -1920,8 +1920,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
if (!OidIsValid(collid))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 63649ba7351..553cc25eb9d 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -44,9 +44,9 @@ typedef struct JsonUniqueHashEntry
/* Context for key uniqueness check in builder functions */
typedef struct JsonUniqueBuilderState
{
- JsonUniqueCheckState check; /* unique check */
+ JsonUniqueCheckState check; /* unique check */
StringInfoData skipped_keys; /* skipped keys with NULL values */
- MemoryContext mcxt; /* context for saving skipped keys */
+ MemoryContext mcxt; /* context for saving skipped keys */
} JsonUniqueBuilderState;
/* Element of object stack for key uniqueness check during json parsing */
@@ -774,10 +774,10 @@ to_json_is_immutable(Oid typoid)
return false;
case JSONTYPE_ARRAY:
- return false; /* TODO recurse into elements */
+ return false; /* TODO recurse into elements */
case JSONTYPE_COMPOSITE:
- return false; /* TODO recurse into fields */
+ return false; /* TODO recurse into fields */
case JSONTYPE_NUMERIC:
case JSONTYPE_CAST:
@@ -938,7 +938,7 @@ static uint32
json_unique_hash(const void *key, Size keysize)
{
const JsonUniqueHashEntry *entry = (JsonUniqueHashEntry *) key;
- uint32 hash = hash_bytes_uint32(entry->object_id);
+ uint32 hash = hash_bytes_uint32(entry->object_id);
hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len);
@@ -1011,6 +1011,7 @@ json_unique_builder_get_skipped_keys(JsonUniqueBuilderState *cxt)
if (!out->data)
{
MemoryContext oldcxt = MemoryContextSwitchTo(cxt->mcxt);
+
initStringInfo(out);
MemoryContextSwitchTo(oldcxt);
}
@@ -1116,8 +1117,8 @@ json_object_agg_transfn_worker(FunctionCallInfo fcinfo,
out = state->str;
/*
- * Append comma delimiter only if we have already outputted some fields
- * after the initial string "{ ".
+ * Append comma delimiter only if we have already outputted some
+ * fields after the initial string "{ ".
*/
if (out->len > 2)
appendStringInfoString(out, ", ");
@@ -1285,7 +1286,7 @@ json_build_object_worker(int nargs, Datum *args, bool *nulls, Oid *types,
if (nulls[i])
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d cannot be null", i + 1),
+ errmsg("argument %d cannot be null", i + 1),
errhint("Object keys should be text.")));
/* save key offset before key appending */
@@ -1327,6 +1328,7 @@ json_build_object(PG_FUNCTION_ARGS)
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
@@ -1382,6 +1384,7 @@ json_build_array(PG_FUNCTION_ARGS)
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
@@ -1706,7 +1709,7 @@ json_validate(text *json, bool check_unique_keys, bool throw_error)
if (throw_error)
json_ereport_error(result, lex);
- return false; /* invalid json */
+ return false; /* invalid json */
}
if (check_unique_keys && !state.unique)
@@ -1716,10 +1719,10 @@ json_validate(text *json, bool check_unique_keys, bool throw_error)
(errcode(ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE),
errmsg("duplicate JSON object key value")));
- return false; /* not unique keys */
+ return false; /* not unique keys */
}
- return true; /* ok */
+ return true; /* ok */
}
/*
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 26d81366c9f..39355e242d2 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -1148,10 +1148,10 @@ to_jsonb_is_immutable(Oid typoid)
return false;
case JSONBTYPE_ARRAY:
- return false; /* TODO recurse into elements */
+ return false; /* TODO recurse into elements */
case JSONBTYPE_COMPOSITE:
- return false; /* TODO recurse into fields */
+ return false; /* TODO recurse into fields */
case JSONBTYPE_NUMERIC:
case JSONBTYPE_JSONCAST:
@@ -1240,6 +1240,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
@@ -1299,6 +1300,7 @@ jsonb_build_array(PG_FUNCTION_ARGS)
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
@@ -2229,7 +2231,7 @@ jsonb_float8(PG_FUNCTION_ARGS)
Jsonb *
JsonbMakeEmptyArray(void)
{
- JsonbValue jbv;
+ JsonbValue jbv;
jbv.type = jbvArray;
jbv.val.array.elems = NULL;
@@ -2245,7 +2247,7 @@ JsonbMakeEmptyArray(void)
Jsonb *
JsonbMakeEmptyObject(void)
{
- JsonbValue jbv;
+ JsonbValue jbv;
jbv.type = jbvObject;
jbv.val.object.pairs = NULL;
@@ -2272,7 +2274,7 @@ JsonbUnquote(Jsonb *jb)
return pstrdup(v.val.boolean ? "true" : "false");
else if (v.type == jbvNumeric)
return DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v.val.numeric)));
+ PointerGetDatum(v.val.numeric)));
else if (v.type == jbvNull)
return pstrdup("null");
else
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 21d874c098a..5318eda9cfb 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -1959,7 +1959,8 @@ uniqueifyJsonbObject(JsonbValue *object, bool unique_keys, bool skip_nulls)
if (hasNonUniq || skip_nulls)
{
- JsonbPair *ptr, *res;
+ JsonbPair *ptr,
+ *res;
while (skip_nulls && object->val.object.nPairs > 0 &&
object->val.object.pairs->value.type == jbvNull)
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index d1356d64166..d427bdfbe0d 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -3139,7 +3139,7 @@ Datum
json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod,
void **cache, MemoryContext mcxt, bool *isnull)
{
- JsValue jsv = { 0 };
+ JsValue jsv = {0};
JsonbValue jbv;
jsv.is_json = json_type == JSONOID;
@@ -3157,7 +3157,8 @@ json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod,
jsv.val.json.str = VARDATA_ANY(json);
jsv.val.json.len = VARSIZE_ANY_EXHDR(json);
- jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */
+ jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in
+ * populate_composite() */
}
else
{
@@ -3174,7 +3175,7 @@ json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod,
if (!*cache)
*cache = MemoryContextAllocZero(mcxt, sizeof(ColumnIOData));
- return populate_record_field(*cache , typid, typmod, NULL, mcxt,
+ return populate_record_field(*cache, typid, typmod, NULL, mcxt,
PointerGetDatum(NULL), &jsv, isnull);
}
diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c
index 0ac14153aae..da9df4ae766 100644
--- a/src/backend/utils/adt/jsonpath.c
+++ b/src/backend/utils/adt/jsonpath.c
@@ -1094,7 +1094,7 @@ typedef struct JsonPathMutableContext
{
List *varnames; /* list of variable names */
List *varexprs; /* list of variable expressions */
- JsonPathDatatypeStatus current; /* status of @ item */
+ JsonPathDatatypeStatus current; /* status of @ item */
bool lax; /* jsonpath is lax or strict */
bool mutable; /* resulting mutability status */
} JsonPathMutableContext;
@@ -1282,18 +1282,18 @@ jspIsMutableWalker(JsonPathItem *jpi, JsonPathMutableContext *cxt)
jspIsMutableWalker(&arg, cxt);
break;
- /* literals */
+ /* literals */
case jpiNull:
case jpiString:
case jpiNumeric:
case jpiBool:
- /* accessors */
+ /* accessors */
case jpiKey:
case jpiAnyKey:
- /* special items */
+ /* special items */
case jpiSubscript:
case jpiLast:
- /* item methods */
+ /* item methods */
case jpiType:
case jpiSize:
case jpiAbs:
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index 2544c6b1551..0943a381bac 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -288,9 +288,9 @@ static void getJsonPathItem(JsonPathExecContext *cxt, JsonPathItem *item,
JsonbValue *value);
static void getJsonPathVariable(JsonPathExecContext *cxt,
JsonPathItem *variable, JsonbValue *value);
-static int getJsonPathVariableFromJsonb(void *varsJsonb, char *varName,
- int varNameLen, JsonbValue *val,
- JsonbValue *baseObject);
+static int getJsonPathVariableFromJsonb(void *varsJsonb, char *varName,
+ int varNameLen, JsonbValue *val,
+ JsonbValue *baseObject);
static int JsonbArraySize(JsonbValue *jb);
static JsonPathBool executeComparison(JsonPathItem *cmp, JsonbValue *lv,
JsonbValue *rv, void *p);
@@ -322,7 +322,7 @@ static int compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2,
static JsonTableJoinState *JsonTableInitPlanState(JsonTableContext *cxt,
- Node *plan, JsonTableScanState *parent);
+ Node *plan, JsonTableScanState *parent);
static bool JsonTableNextRow(JsonTableScanState *scan);
@@ -2743,7 +2743,7 @@ static int
compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2,
bool useTz, bool *cast_error)
{
- PGFunction cmpfunc;
+ PGFunction cmpfunc;
*cast_error = false;
@@ -2987,8 +2987,8 @@ JsonPathQuery(Datum jb, JsonPath *jp, JsonWrapper wrapper, bool *empty,
JsonbValue *
JsonPathValue(Datum jb, JsonPath *jp, bool *empty, bool *error, List *vars)
{
- JsonbValue *res;
- JsonValueList found = { 0 };
+ JsonbValue *res;
+ JsonValueList found = {0};
JsonPathExecResult jper PG_USED_FOR_ASSERTS_ONLY;
int count;
@@ -3123,8 +3123,8 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res)
text *txt = DatumGetTextP(val);
char *str = text_to_cstring(txt);
Jsonb *jb =
- DatumGetJsonbP(DirectFunctionCall1(jsonb_in,
- CStringGetDatum(str)));
+ DatumGetJsonbP(DirectFunctionCall1(jsonb_in,
+ CStringGetDatum(str)));
pfree(str);
@@ -3221,7 +3221,7 @@ JsonTableInitOpaque(TableFuncScanState *state, int natts)
{
JsonTableContext *cxt;
PlanState *ps = &state->ss.ps;
- TableFuncScan *tfs = castNode(TableFuncScan, ps->plan);
+ TableFuncScan *tfs = castNode(TableFuncScan, ps->plan);
TableFunc *tf = tfs->tablefunc;
JsonExpr *ci = castNode(JsonExpr, tf->docexpr);
JsonTableParent *root = castNode(JsonTableParent, tf->plan);
@@ -3298,7 +3298,7 @@ JsonTableResetContextItem(JsonTableScanState *scan, Datum item)
{
MemoryContext oldcxt;
JsonPathExecResult res;
- Jsonb *js = (Jsonb *) DatumGetJsonbP(item);
+ Jsonb *js = (Jsonb *) DatumGetJsonbP(item);
JsonValueListClear(&scan->found);
@@ -3307,7 +3307,7 @@ JsonTableResetContextItem(JsonTableScanState *scan, Datum item)
oldcxt = MemoryContextSwitchTo(scan->mcxt);
res = executeJsonPath(scan->path, scan->args, EvalJsonPathVar, js,
- scan->errorOnError, &scan->found, false /* FIXME */);
+ scan->errorOnError, &scan->found, false /* FIXME */ );
MemoryContextSwitchTo(oldcxt);
@@ -3369,9 +3369,9 @@ JsonTableNextJoinRow(JsonTableJoinState *state)
/* inner rows are exhausted */
if (state->u.join.cross)
- state->u.join.advanceRight = false; /* next outer row */
+ state->u.join.advanceRight = false; /* next outer row */
else
- return false; /* end of scan */
+ return false; /* end of scan */
}
while (!state->u.join.advanceRight)
@@ -3387,7 +3387,7 @@ JsonTableNextJoinRow(JsonTableJoinState *state)
JsonTableRescanRecursive(state->u.join.right);
if (!JsonTableNextJoinRow(state->u.join.right))
- continue; /* next outer row */
+ continue; /* next outer row */
state->u.join.advanceRight = true; /* next inner row */
}
@@ -3460,7 +3460,7 @@ JsonTableNextRow(JsonTableScanState *scan)
{
scan->current = PointerGetDatum(NULL);
scan->currentIsNull = true;
- return false; /* end of scan */
+ return false; /* end of scan */
}
/* set current row item */
@@ -3518,12 +3518,12 @@ JsonTableGetValue(TableFuncScanState *state, int colnum,
JsonTableScanState *scan = cxt->colexprs[colnum].scan;
Datum result;
- if (scan->currentIsNull) /* NULL from outer/union join */
+ if (scan->currentIsNull) /* NULL from outer/union join */
{
result = (Datum) 0;
*isnull = true;
}
- else if (estate) /* regular column */
+ else if (estate) /* regular column */
{
result = ExecEvalExpr(estate, econtext, isnull);
}
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 833ee8f814c..e02fc3725ad 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -181,8 +181,8 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
if (!OidIsValid(collation))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c
index 67d7d67fb83..da5c7d09069 100644
--- a/src/backend/utils/adt/multirangetypes.c
+++ b/src/backend/utils/adt/multirangetypes.c
@@ -1439,7 +1439,8 @@ multirange_agg_transfn(PG_FUNCTION_ARGS)
if (range_count == 0)
{
/*
- * Add an empty range so we get an empty result (not a null result).
+ * Add an empty range so we get an empty result (not a null
+ * result).
*/
accumArrayResult(state,
RangeTypePGetDatum(make_empty_range(rngtypcache)),
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 45547f6ae7f..920a63b0081 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -8537,139 +8537,138 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result,
alloc_var(result, res_ndigits);
res_digits = result->digits;
+ /*
+ * The full multiple-place algorithm is taken from Knuth volume 2,
+ * Algorithm 4.3.1D.
+ *
+ * We need the first divisor digit to be >= NBASE/2. If it isn't, make it
+ * so by scaling up both the divisor and dividend by the factor "d". (The
+ * reason for allocating dividend[0] above is to leave room for possible
+ * carry here.)
+ */
+ if (divisor[1] < HALF_NBASE)
+ {
+ int d = NBASE / (divisor[1] + 1);
+
+ carry = 0;
+ for (i = var2ndigits; i > 0; i--)
+ {
+ carry += divisor[i] * d;
+ divisor[i] = carry % NBASE;
+ carry = carry / NBASE;
+ }
+ Assert(carry == 0);
+ carry = 0;
+ /* at this point only var1ndigits of dividend can be nonzero */
+ for (i = var1ndigits; i >= 0; i--)
+ {
+ carry += dividend[i] * d;
+ dividend[i] = carry % NBASE;
+ carry = carry / NBASE;
+ }
+ Assert(carry == 0);
+ Assert(divisor[1] >= HALF_NBASE);
+ }
+ /* First 2 divisor digits are used repeatedly in main loop */
+ divisor1 = divisor[1];
+ divisor2 = divisor[2];
+
+ /*
+ * Begin the main loop. Each iteration of this loop produces the j'th
+ * quotient digit by dividing dividend[j .. j + var2ndigits] by the
+ * divisor; this is essentially the same as the common manual procedure
+ * for long division.
+ */
+ for (j = 0; j < res_ndigits; j++)
+ {
+ /* Estimate quotient digit from the first two dividend digits */
+ int next2digits = dividend[j] * NBASE + dividend[j + 1];
+ int qhat;
+
/*
- * The full multiple-place algorithm is taken from Knuth volume 2,
- * Algorithm 4.3.1D.
- *
- * We need the first divisor digit to be >= NBASE/2. If it isn't,
- * make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
- * leave room for possible carry here.)
+ * If next2digits are 0, then quotient digit must be 0 and there's no
+ * need to adjust the working dividend. It's worth testing here to
+ * fall out ASAP when processing trailing zeroes in a dividend.
*/
- if (divisor[1] < HALF_NBASE)
+ if (next2digits == 0)
{
- int d = NBASE / (divisor[1] + 1);
-
- carry = 0;
- for (i = var2ndigits; i > 0; i--)
- {
- carry += divisor[i] * d;
- divisor[i] = carry % NBASE;
- carry = carry / NBASE;
- }
- Assert(carry == 0);
- carry = 0;
- /* at this point only var1ndigits of dividend can be nonzero */
- for (i = var1ndigits; i >= 0; i--)
- {
- carry += dividend[i] * d;
- dividend[i] = carry % NBASE;
- carry = carry / NBASE;
- }
- Assert(carry == 0);
- Assert(divisor[1] >= HALF_NBASE);
+ res_digits[j] = 0;
+ continue;
}
- /* First 2 divisor digits are used repeatedly in main loop */
- divisor1 = divisor[1];
- divisor2 = divisor[2];
+
+ if (dividend[j] == divisor1)
+ qhat = NBASE - 1;
+ else
+ qhat = next2digits / divisor1;
/*
- * Begin the main loop. Each iteration of this loop produces the j'th
- * quotient digit by dividing dividend[j .. j + var2ndigits] by the
- * divisor; this is essentially the same as the common manual
- * procedure for long division.
+ * Adjust quotient digit if it's too large. Knuth proves that after
+ * this step, the quotient digit will be either correct or just one
+ * too large. (Note: it's OK to use dividend[j+2] here because we
+ * know the divisor length is at least 2.)
*/
- for (j = 0; j < res_ndigits; j++)
+ while (divisor2 * qhat >
+ (next2digits - qhat * divisor1) * NBASE + dividend[j + 2])
+ qhat--;
+
+ /* As above, need do nothing more when quotient digit is 0 */
+ if (qhat > 0)
{
- /* Estimate quotient digit from the first two dividend digits */
- int next2digits = dividend[j] * NBASE + dividend[j + 1];
- int qhat;
+ NumericDigit *dividend_j = &dividend[j];
/*
- * If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
- * here to fall out ASAP when processing trailing zeroes in a
- * dividend.
+ * Multiply the divisor by qhat, and subtract that from the
+ * working dividend. The multiplication and subtraction are
+ * folded together here, noting that qhat <= NBASE (since it might
+ * be one too large), and so the intermediate result "tmp_result"
+ * is in the range [-NBASE^2, NBASE - 1], and "borrow" is in the
+ * range [0, NBASE].
*/
- if (next2digits == 0)
+ borrow = 0;
+ for (i = var2ndigits; i >= 0; i--)
{
- res_digits[j] = 0;
- continue;
- }
+ int tmp_result;
- if (dividend[j] == divisor1)
- qhat = NBASE - 1;
- else
- qhat = next2digits / divisor1;
+ tmp_result = dividend_j[i] - borrow - divisor[i] * qhat;
+ borrow = (NBASE - 1 - tmp_result) / NBASE;
+ dividend_j[i] = tmp_result + borrow * NBASE;
+ }
/*
- * Adjust quotient digit if it's too large. Knuth proves that
- * after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
- * because we know the divisor length is at least 2.)
+ * If we got a borrow out of the top dividend digit, then indeed
+ * qhat was one too large. Fix it, and add back the divisor to
+ * correct the working dividend. (Knuth proves that this will
+ * occur only about 3/NBASE of the time; hence, it's a good idea
+ * to test this code with small NBASE to be sure this section gets
+ * exercised.)
*/
- while (divisor2 * qhat >
- (next2digits - qhat * divisor1) * NBASE + dividend[j + 2])
- qhat--;
-
- /* As above, need do nothing more when quotient digit is 0 */
- if (qhat > 0)
+ if (borrow)
{
- NumericDigit *dividend_j = &dividend[j];
-
- /*
- * Multiply the divisor by qhat, and subtract that from the
- * working dividend. The multiplication and subtraction are
- * folded together here, noting that qhat <= NBASE (since it
- * might be one too large), and so the intermediate result
- * "tmp_result" is in the range [-NBASE^2, NBASE - 1], and
- * "borrow" is in the range [0, NBASE].
- */
- borrow = 0;
+ qhat--;
+ carry = 0;
for (i = var2ndigits; i >= 0; i--)
{
- int tmp_result;
-
- tmp_result = dividend_j[i] - borrow - divisor[i] * qhat;
- borrow = (NBASE - 1 - tmp_result) / NBASE;
- dividend_j[i] = tmp_result + borrow * NBASE;
- }
-
- /*
- * If we got a borrow out of the top dividend digit, then
- * indeed qhat was one too large. Fix it, and add back the
- * divisor to correct the working dividend. (Knuth proves
- * that this will occur only about 3/NBASE of the time; hence,
- * it's a good idea to test this code with small NBASE to be
- * sure this section gets exercised.)
- */
- if (borrow)
- {
- qhat--;
- carry = 0;
- for (i = var2ndigits; i >= 0; i--)
+ carry += dividend_j[i] + divisor[i];
+ if (carry >= NBASE)
{
- carry += dividend_j[i] + divisor[i];
- if (carry >= NBASE)
- {
- dividend_j[i] = carry - NBASE;
- carry = 1;
- }
- else
- {
- dividend_j[i] = carry;
- carry = 0;
- }
+ dividend_j[i] = carry - NBASE;
+ carry = 1;
+ }
+ else
+ {
+ dividend_j[i] = carry;
+ carry = 0;
}
- /* A carry should occur here to cancel the borrow above */
- Assert(carry == 1);
}
+ /* A carry should occur here to cancel the borrow above */
+ Assert(carry == 1);
}
-
- /* And we're done with this quotient digit */
- res_digits[j] = qhat;
}
+ /* And we're done with this quotient digit */
+ res_digits[j] = qhat;
+ }
+
pfree(dividend);
/*
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 2c47dea3429..a0490a75224 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -1625,7 +1625,7 @@ pg_newlocale_from_collation(Oid collid)
}
datum = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion,
- &isnull);
+ &isnull);
if (!isnull)
{
char *actual_versionstr;
@@ -1992,7 +1992,7 @@ check_icu_locale(const char *icu_locale)
{
#ifdef USE_ICU
UCollator *collator;
- UErrorCode status;
+ UErrorCode status;
status = U_ZERO_ERROR;
collator = ucol_open(icu_locale, &status);
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index d3ad795a6ea..893690dad52 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -2411,7 +2411,7 @@ pg_stat_have_stats(PG_FUNCTION_ARGS)
char *stats_type = text_to_cstring(PG_GETARG_TEXT_P(0));
Oid dboid = PG_GETARG_OID(1);
Oid objoid = PG_GETARG_OID(2);
- PgStat_Kind kind = pgstat_get_kind_from_str(stats_type);
+ PgStat_Kind kind = pgstat_get_kind_from_str(stats_type);
PG_RETURN_BOOL(pgstat_have_entry(kind, dboid, objoid));
}
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index f90b0a3b358..1190b8000bc 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -608,8 +608,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* Non-empty range A contains non-empty range B if lower
* bound of A is lower or equal to lower bound of range B
- * and upper bound of range A is greater than or equal to upper
- * bound of range A.
+ * and upper bound of range A is greater than or equal to
+ * upper bound of range A.
*
* All non-empty ranges contain an empty range.
*/
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 01d4c22cfce..51b3fdc9a01 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -113,8 +113,10 @@ typedef struct RI_ConstraintInfo
Oid fk_relid; /* referencing relation */
char confupdtype; /* foreign key's ON UPDATE action */
char confdeltype; /* foreign key's ON DELETE action */
- int ndelsetcols; /* number of columns referenced in ON DELETE SET clause */
- int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on delete */
+ int ndelsetcols; /* number of columns referenced in ON DELETE
+ * SET clause */
+ int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on
+ * delete */
char confmatchtype; /* foreign key's match type */
int nkeys; /* number of key columns */
int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */
@@ -1059,7 +1061,8 @@ ri_set(TriggerData *trigdata, bool is_set_null, int tgkind)
/*
* Fetch or prepare a saved plan for the trigger.
*/
- switch (tgkind) {
+ switch (tgkind)
+ {
case RI_TRIGTYPE_UPDATE:
queryno = is_set_null
? RI_PLAN_SETNULL_ONUPDATE
@@ -1086,25 +1089,29 @@ ri_set(TriggerData *trigdata, bool is_set_null, int tgkind)
const char *qualsep;
Oid queryoids[RI_MAX_NUMKEYS];
const char *fk_only;
- int num_cols_to_set;
+ int num_cols_to_set;
const int16 *set_cols;
- switch (tgkind) {
+ switch (tgkind)
+ {
case RI_TRIGTYPE_UPDATE:
num_cols_to_set = riinfo->nkeys;
set_cols = riinfo->fk_attnums;
break;
case RI_TRIGTYPE_DELETE:
+
/*
- * If confdelsetcols are present, then we only update
- * the columns specified in that array, otherwise we
- * update all the referencing columns.
+ * If confdelsetcols are present, then we only update the
+ * columns specified in that array, otherwise we update all
+ * the referencing columns.
*/
- if (riinfo->ndelsetcols != 0) {
+ if (riinfo->ndelsetcols != 0)
+ {
num_cols_to_set = riinfo->ndelsetcols;
set_cols = riinfo->confdelsetcols;
}
- else {
+ else
+ {
num_cols_to_set = riinfo->nkeys;
set_cols = riinfo->fk_attnums;
}
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 5d49f564a2e..f22ecfc5832 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -2331,7 +2331,10 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
if (string)
appendStringInfo(&buf, " ON DELETE %s", string);
- /* Add columns specified to SET NULL or SET DEFAULT if provided. */
+ /*
+ * Add columns specified to SET NULL or SET DEFAULT if
+ * provided.
+ */
val = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_confdelsetcols, &isnull);
if (!isnull)
@@ -8260,7 +8263,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
case T_GroupingFunc: /* own parentheses */
case T_WindowFunc: /* own parentheses */
case T_CaseExpr: /* other separators */
- case T_JsonExpr: /* own parentheses */
+ case T_JsonExpr: /* own parentheses */
return true;
default:
return false;
@@ -8456,8 +8459,8 @@ get_json_format(JsonFormat *format, StringInfo buf)
if (format->encoding != JS_ENC_DEFAULT)
{
const char *encoding =
- format->encoding == JS_ENC_UTF16 ? "UTF16" :
- format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8";
+ format->encoding == JS_ENC_UTF16 ? "UTF16" :
+ format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8";
appendStringInfo(buf, " ENCODING %s", encoding);
}
@@ -8479,7 +8482,7 @@ get_json_returning(JsonReturning *returning, StringInfo buf,
if (!json_format_by_default ||
returning->format->format_type !=
- (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON))
+ (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON))
get_json_format(returning->format, buf);
}
@@ -9778,7 +9781,8 @@ get_rule_expr(Node *node, deparse_context *context,
if (jexpr->passing_values)
{
- ListCell *lc1, *lc2;
+ ListCell *lc1,
+ *lc2;
bool needcomma = false;
appendStringInfoString(buf, " PASSING ");
@@ -10147,7 +10151,7 @@ get_json_constructor(JsonConstructorExpr *ctor, deparse_context *context,
if (nargs > 0)
{
const char *sep = ctor->type == JSCTOR_JSON_OBJECT &&
- (nargs % 2) != 0 ? " : " : ", ";
+ (nargs % 2) != 0 ? " : " : ", ";
appendStringInfoString(buf, sep);
}
@@ -10251,7 +10255,8 @@ get_agg_expr_helper(Aggref *aggref, deparse_context *context,
if (is_json_objectagg)
{
if (i > 2)
- break; /* skip ABSENT ON NULL and WITH UNIQUE args */
+ break; /* skip ABSENT ON NULL and WITH UNIQUE
+ * args */
appendStringInfoString(buf, " : ");
}
@@ -11160,16 +11165,16 @@ get_json_table_nested_columns(TableFunc *tf, Node *node,
}
else
{
- JsonTableParent *n = castNode(JsonTableParent, node);
+ JsonTableParent *n = castNode(JsonTableParent, node);
- if (needcomma)
- appendStringInfoChar(context->buf, ',');
+ if (needcomma)
+ appendStringInfoChar(context->buf, ',');
- appendStringInfoChar(context->buf, ' ');
- appendContextKeyword(context, "NESTED PATH ", 0, 0, 0);
- get_const_expr(n->path, context, -1);
- appendStringInfo(context->buf, " AS %s", quote_identifier(n->name));
- get_json_table_columns(tf, n, context, showimplicit);
+ appendStringInfoChar(context->buf, ' ');
+ appendContextKeyword(context, "NESTED PATH ", 0, 0, 0);
+ get_const_expr(n->path, context, -1);
+ appendStringInfo(context->buf, " AS %s", quote_identifier(n->name));
+ get_json_table_columns(tf, n, context, showimplicit);
}
}
@@ -11199,17 +11204,17 @@ get_json_table_plan(TableFunc *tf, Node *node, deparse_context *context,
}
else
{
- JsonTableParent *n = castNode(JsonTableParent, node);
+ JsonTableParent *n = castNode(JsonTableParent, node);
- appendStringInfoString(context->buf, quote_identifier(n->name));
+ appendStringInfoString(context->buf, quote_identifier(n->name));
- if (n->child)
- {
+ if (n->child)
+ {
appendStringInfoString(context->buf,
n->outerJoin ? " OUTER " : " INNER ");
get_json_table_plan(tf, n->child, context,
IsA(n->child, JsonTableSibling));
- }
+ }
}
if (parenthesize)
@@ -11348,7 +11353,8 @@ get_json_table(TableFunc *tf, deparse_context *context, bool showimplicit)
if (jexpr->passing_values)
{
- ListCell *lc1, *lc2;
+ ListCell *lc1,
+ *lc2;
bool needcomma = false;
appendStringInfoChar(buf, ' ');
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 71cbc1c3d80..fa1f589fad8 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -3380,9 +3380,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
*/
double
estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs,
- double input_rows,
- List **pgset, EstimationInfo *estinfo,
- List **cache_varinfos, int prevNExprs)
+ double input_rows,
+ List **pgset, EstimationInfo *estinfo,
+ List **cache_varinfos, int prevNExprs)
{
List *varinfos = (cache_varinfos) ? *cache_varinfos : NIL;
double srf_multiplier = 1.0;
@@ -3433,7 +3433,7 @@ estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs,
if (cache_varinfos && j++ < prevNExprs)
{
if (pgset)
- i++; /* to keep in sync with lines below */
+ i++; /* to keep in sync with lines below */
continue;
}
@@ -3944,7 +3944,7 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
Oid statOid = InvalidOid;
MVNDistinct *stats;
StatisticExtInfo *matched_info = NULL;
- RangeTblEntry *rte;
+ RangeTblEntry *rte;
/* bail out immediately if the table has no extended statistics */
if (!rel->statlist)
@@ -5255,7 +5255,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
foreach(slist, onerel->statlist)
{
StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist);
- RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
+ RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
ListCell *expr_item;
int pos;
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 8acb725bc8f..f70f829d830 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -2194,6 +2194,7 @@ timestamp_sortsupport(PG_FUNCTION_ARGS)
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
#if SIZEOF_DATUM >= 8
+
/*
* If this build has pass-by-value timestamps, then we can use a standard
* comparator function.
@@ -4349,59 +4350,59 @@ interval_trunc(PG_FUNCTION_ARGS)
if (type == UNITS)
{
interval2itm(*interval, tm);
- switch (val)
- {
- case DTK_MILLENNIUM:
- /* caution: C division may have negative remainder */
- tm->tm_year = (tm->tm_year / 1000) * 1000;
- /* FALL THRU */
- case DTK_CENTURY:
- /* caution: C division may have negative remainder */
- tm->tm_year = (tm->tm_year / 100) * 100;
- /* FALL THRU */
- case DTK_DECADE:
- /* caution: C division may have negative remainder */
- tm->tm_year = (tm->tm_year / 10) * 10;
- /* FALL THRU */
- case DTK_YEAR:
- tm->tm_mon = 0;
- /* FALL THRU */
- case DTK_QUARTER:
- tm->tm_mon = 3 * (tm->tm_mon / 3);
- /* FALL THRU */
- case DTK_MONTH:
- tm->tm_mday = 0;
- /* FALL THRU */
- case DTK_DAY:
- tm->tm_hour = 0;
- /* FALL THRU */
- case DTK_HOUR:
- tm->tm_min = 0;
- /* FALL THRU */
- case DTK_MINUTE:
- tm->tm_sec = 0;
- /* FALL THRU */
- case DTK_SECOND:
- tm->tm_usec = 0;
- break;
- case DTK_MILLISEC:
- tm->tm_usec = (tm->tm_usec / 1000) * 1000;
- break;
- case DTK_MICROSEC:
- break;
-
- default:
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unit \"%s\" not supported for type %s",
- lowunits, format_type_be(INTERVALOID)),
- (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0));
- }
+ switch (val)
+ {
+ case DTK_MILLENNIUM:
+ /* caution: C division may have negative remainder */
+ tm->tm_year = (tm->tm_year / 1000) * 1000;
+ /* FALL THRU */
+ case DTK_CENTURY:
+ /* caution: C division may have negative remainder */
+ tm->tm_year = (tm->tm_year / 100) * 100;
+ /* FALL THRU */
+ case DTK_DECADE:
+ /* caution: C division may have negative remainder */
+ tm->tm_year = (tm->tm_year / 10) * 10;
+ /* FALL THRU */
+ case DTK_YEAR:
+ tm->tm_mon = 0;
+ /* FALL THRU */
+ case DTK_QUARTER:
+ tm->tm_mon = 3 * (tm->tm_mon / 3);
+ /* FALL THRU */
+ case DTK_MONTH:
+ tm->tm_mday = 0;
+ /* FALL THRU */
+ case DTK_DAY:
+ tm->tm_hour = 0;
+ /* FALL THRU */
+ case DTK_HOUR:
+ tm->tm_min = 0;
+ /* FALL THRU */
+ case DTK_MINUTE:
+ tm->tm_sec = 0;
+ /* FALL THRU */
+ case DTK_SECOND:
+ tm->tm_usec = 0;
+ break;
+ case DTK_MILLISEC:
+ tm->tm_usec = (tm->tm_usec / 1000) * 1000;
+ break;
+ case DTK_MICROSEC:
+ break;
- if (itm2interval(tm, result) != 0)
+ default:
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("interval out of range")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("unit \"%s\" not supported for type %s",
+ lowunits, format_type_be(INTERVALOID)),
+ (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0));
+ }
+
+ if (itm2interval(tm, result) != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("interval out of range")));
}
else
{
@@ -5225,80 +5226,80 @@ interval_part_common(PG_FUNCTION_ARGS, bool retnumeric)
if (type == UNITS)
{
interval2itm(*interval, tm);
- switch (val)
- {
- case DTK_MICROSEC:
- intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec;
- break;
+ switch (val)
+ {
+ case DTK_MICROSEC:
+ intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec;
+ break;
- case DTK_MILLISEC:
- if (retnumeric)
- /*---
- * tm->tm_sec * 1000 + fsec / 1000
- * = (tm->tm_sec * 1'000'000 + fsec) / 1000
- */
- PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3));
- else
- PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0);
- break;
+ case DTK_MILLISEC:
+ if (retnumeric)
+ /*---
+ * tm->tm_sec * 1000 + fsec / 1000
+ * = (tm->tm_sec * 1'000'000 + fsec) / 1000
+ */
+ PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3));
+ else
+ PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0);
+ break;
- case DTK_SECOND:
- if (retnumeric)
- /*---
- * tm->tm_sec + fsec / 1'000'000
- * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000
- */
- PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6));
- else
- PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0);
- break;
+ case DTK_SECOND:
+ if (retnumeric)
+ /*---
+ * tm->tm_sec + fsec / 1'000'000
+ * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000
+ */
+ PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6));
+ else
+ PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0);
+ break;
- case DTK_MINUTE:
- intresult = tm->tm_min;
- break;
+ case DTK_MINUTE:
+ intresult = tm->tm_min;
+ break;
- case DTK_HOUR:
- intresult = tm->tm_hour;
- break;
+ case DTK_HOUR:
+ intresult = tm->tm_hour;
+ break;
- case DTK_DAY:
- intresult = tm->tm_mday;
- break;
+ case DTK_DAY:
+ intresult = tm->tm_mday;
+ break;
- case DTK_MONTH:
- intresult = tm->tm_mon;
- break;
+ case DTK_MONTH:
+ intresult = tm->tm_mon;
+ break;
- case DTK_QUARTER:
- intresult = (tm->tm_mon / 3) + 1;
- break;
+ case DTK_QUARTER:
+ intresult = (tm->tm_mon / 3) + 1;
+ break;
- case DTK_YEAR:
- intresult = tm->tm_year;
- break;
+ case DTK_YEAR:
+ intresult = tm->tm_year;
+ break;
- case DTK_DECADE:
- /* caution: C division may have negative remainder */
- intresult = tm->tm_year / 10;
- break;
+ case DTK_DECADE:
+ /* caution: C division may have negative remainder */
+ intresult = tm->tm_year / 10;
+ break;
- case DTK_CENTURY:
- /* caution: C division may have negative remainder */
- intresult = tm->tm_year / 100;
- break;
+ case DTK_CENTURY:
+ /* caution: C division may have negative remainder */
+ intresult = tm->tm_year / 100;
+ break;
- case DTK_MILLENNIUM:
- /* caution: C division may have negative remainder */
- intresult = tm->tm_year / 1000;
- break;
+ case DTK_MILLENNIUM:
+ /* caution: C division may have negative remainder */
+ intresult = tm->tm_year / 1000;
+ break;
- default:
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unit \"%s\" not supported for type %s",
- lowunits, format_type_be(INTERVALOID))));
- intresult = 0;
- }
+ default:
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("unit \"%s\" not supported for type %s",
+ lowunits, format_type_be(INTERVALOID))));
+ intresult = 0;
+ }
}
else if (type == RESERV && val == DTK_EPOCH)
{
diff --git a/src/backend/utils/adt/uuid.c b/src/backend/utils/adt/uuid.c
index a157f864e12..7cec9372485 100644
--- a/src/backend/utils/adt/uuid.c
+++ b/src/backend/utils/adt/uuid.c
@@ -377,8 +377,8 @@ uuid_abbrev_convert(Datum original, SortSupport ssup)
*
* This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer
* 3-way comparator) works correctly on all platforms. If we didn't do
- * this, the comparator would have to call memcmp() with a pair of pointers
- * to the first byte of each abbreviated key, which is slower.
+ * this, the comparator would have to call memcmp() with a pair of
+ * pointers to the first byte of each abbreviated key, which is slower.
*/
res = DatumBigEndianToNative(res);
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 8b5b30ed714..bbeb0a2653a 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -744,7 +744,7 @@ bpchareq(PG_FUNCTION_ARGS)
bool result;
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
check_collation_set(collid);
@@ -789,7 +789,7 @@ bpcharne(PG_FUNCTION_ARGS)
bool result;
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
check_collation_set(collid);
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index cfc135c7beb..919138eaf32 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -1758,7 +1758,7 @@ texteq(PG_FUNCTION_ARGS)
{
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
bool result;
check_collation_set(collid);
@@ -1817,7 +1817,7 @@ textne(PG_FUNCTION_ARGS)
{
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
bool result;
check_collation_set(collid);
@@ -2674,8 +2674,8 @@ done:
*
* This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer
* 3-way comparator) works correctly on all platforms. If we didn't do
- * this, the comparator would have to call memcmp() with a pair of pointers
- * to the first byte of each abbreviated key, which is slower.
+ * this, the comparator would have to call memcmp() with a pair of
+ * pointers to the first byte of each abbreviated key, which is slower.
*/
res = DatumBigEndianToNative(res);
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 4cf6db504ff..0d6a2956748 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -689,10 +689,10 @@ RevalidateCachedQuery(CachedPlanSource *plansource,
queryEnv);
else
tlist = pg_analyze_and_rewrite_fixedparams(rawtree,
- plansource->query_string,
- plansource->param_types,
- plansource->num_params,
- queryEnv);
+ plansource->query_string,
+ plansource->param_types,
+ plansource->num_params,
+ queryEnv);
/* Release snapshot if we got one */
if (snapshot_set)
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 43f14c233d6..60e72f9e8bf 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -5107,7 +5107,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Bitmapset *uindexattrs; /* columns in unique indexes */
Bitmapset *pkindexattrs; /* columns in the primary index */
Bitmapset *idindexattrs; /* columns in the replica identity */
- Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
+ Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
List *indexoidlist;
List *newindexoidlist;
Oid relpkindex;
@@ -5237,7 +5237,7 @@ restart:
{
if (indexDesc->rd_indam->amhotblocking)
hotblockingattrs = bms_add_member(hotblockingattrs,
- attrnum - FirstLowInvalidHeapAttributeNumber);
+ attrnum - FirstLowInvalidHeapAttributeNumber);
if (isKey && i < indexDesc->rd_index->indnkeyatts)
uindexattrs = bms_add_member(uindexattrs,
@@ -5258,9 +5258,9 @@ restart:
pull_varattnos(indexExpressions, 1, &hotblockingattrs);
/*
- * Collect all attributes in the index predicate, too. We have to ignore
- * amhotblocking flag, because the row might become indexable, in which
- * case we have to add it to the index.
+ * Collect all attributes in the index predicate, too. We have to
+ * ignore amhotblocking flag, because the row might become indexable,
+ * in which case we have to add it to the index.
*/
pull_varattnos(indexPredicate, 1, &hotblockingattrs);
@@ -5308,9 +5308,8 @@ restart:
/*
* Now save copies of the bitmaps in the relcache entry. We intentionally
* set rd_attrsvalid last, because that's what signals validity of the
- * values; if we run out of memory before making that copy, we won't
- * leave the relcache entry looking like the other ones are valid but
- * empty.
+ * values; if we run out of memory before making that copy, we won't leave
+ * the relcache entry looking like the other ones are valid but empty.
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
relation->rd_keyattr = bms_copy(uindexattrs);
@@ -5636,8 +5635,8 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
pubdesc->pubactions.pubtruncate |= pubform->pubtruncate;
/*
- * Check if all columns referenced in the filter expression are part of
- * the REPLICA IDENTITY index or not.
+ * Check if all columns referenced in the filter expression are part
+ * of the REPLICA IDENTITY index or not.
*
* If the publication is FOR ALL TABLES then it means the table has no
* row filters and we can skip the validation.
@@ -5645,7 +5644,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
if (!pubform->puballtables &&
(pubform->pubupdate || pubform->pubdelete) &&
pub_rf_contains_invalid_column(pubid, relation, ancestors,
- pubform->pubviaroot))
+ pubform->pubviaroot))
{
if (pubform->pubupdate)
pubdesc->rf_valid_for_update = false;
@@ -5662,7 +5661,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
if (!pubform->puballtables &&
(pubform->pubupdate || pubform->pubdelete) &&
pub_collist_contains_invalid_column(pubid, relation, ancestors,
- pubform->pubviaroot))
+ pubform->pubviaroot))
{
if (pubform->pubupdate)
pubdesc->cols_valid_for_update = false;
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index 75a3aedc5af..2a330cf3ba4 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -287,7 +287,7 @@ RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId)
void
RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath)
{
- RelMapFile map;
+ RelMapFile map;
/*
* Read the relmap file from the source database.
@@ -302,8 +302,8 @@ RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath)
* RelationMappingLock.
*
* There's no point in trying to preserve files here. The new database
- * isn't usable yet anyway, and won't ever be if we can't install a
- * relmap file.
+ * isn't usable yet anyway, and won't ever be if we can't install a relmap
+ * file.
*/
write_relmap_file(&map, true, false, false, dbid, tsid, dstdbpath);
}
@@ -1089,11 +1089,11 @@ relmap_redo(XLogReaderState *record)
* There shouldn't be anyone else updating relmaps during WAL replay,
* but grab the lock to interlock against load_relmap_file().
*
- * Note that we use the same WAL record for updating the relmap of
- * an existing database as we do for creating a new database. In
- * the latter case, taking the relmap log and sending sinval messages
- * is unnecessary, but harmless. If we wanted to avoid it, we could
- * add a flag to the WAL record to indicate which operation is being
+ * Note that we use the same WAL record for updating the relmap of an
+ * existing database as we do for creating a new database. In the
+ * latter case, taking the relmap log and sending sinval messages is
+ * unnecessary, but harmless. If we wanted to avoid it, we could add a
+ * flag to the WAL record to indicate which operation is being
* performed.
*/
LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index d297ba08295..fa701daa26f 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -429,6 +429,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect
iculocale = NULL;
default_locale.provider = dbform->datlocprovider;
+
/*
* Default locale is currently always deterministic. Nondeterministic
* locales currently don't support pattern matching, which would break a
@@ -604,8 +605,8 @@ BaseInit(void)
InitTemporaryFileAccess();
/*
- * Initialize local buffers for WAL record construction, in case we
- * ever try to insert XLOG.
+ * Initialize local buffers for WAL record construction, in case we ever
+ * try to insert XLOG.
*/
InitXLogInsert();
@@ -693,10 +694,10 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
}
/*
- * If this is either a bootstrap process or a standalone backend, start
- * up the XLOG machinery, and register to have it closed down at exit.
- * In other cases, the startup process is responsible for starting up
- * the XLOG machinery, and the checkpointer for closing it down.
+ * If this is either a bootstrap process or a standalone backend, start up
+ * the XLOG machinery, and register to have it closed down at exit. In
+ * other cases, the startup process is responsible for starting up the
+ * XLOG machinery, and the checkpointer for closing it down.
*/
if (!IsUnderPostmaster)
{
@@ -1241,7 +1242,8 @@ ShutdownPostgres(int code, Datum arg)
*/
#ifdef USE_ASSERT_CHECKING
{
- int held_lwlocks = LWLockHeldCount();
+ int held_lwlocks = LWLockHeldCount();
+
if (held_lwlocks)
elog(WARNING, "holding %d lwlocks at the end of ShutdownPostgres()",
held_lwlocks);
diff --git a/src/backend/utils/misc/queryjumble.c b/src/backend/utils/misc/queryjumble.c
index 2ffa014618f..d35027275f1 100644
--- a/src/backend/utils/misc/queryjumble.c
+++ b/src/backend/utils/misc/queryjumble.c
@@ -787,7 +787,7 @@ JumbleExpr(JumbleState *jstate, Node *node)
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) node;
+ JsonExpr *jexpr = (JsonExpr *) node;
APP_JUMB(jexpr->op);
JumbleExpr(jstate, jexpr->formatted_expr);
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index a4c3b736678..8340a660526 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -469,7 +469,7 @@ struct Tuplesortstate
/* These are specific to the index_btree subcase: */
bool enforceUnique; /* complain if we find duplicate tuples */
- bool uniqueNullsNotDistinct; /* unique constraint null treatment */
+ bool uniqueNullsNotDistinct; /* unique constraint null treatment */
/* These are specific to the index_hash subcase: */
uint32 high_mask; /* masks for sortable part of hash code */
@@ -706,8 +706,8 @@ qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
return compare;
/*
- * No need to waste effort calling the tiebreak function when there are
- * no other keys to sort on.
+ * No need to waste effort calling the tiebreak function when there are no
+ * other keys to sort on.
*/
if (state->onlyKey != NULL)
return 0;
@@ -730,8 +730,8 @@ qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
return compare;
/*
- * No need to waste effort calling the tiebreak function when there are
- * no other keys to sort on.
+ * No need to waste effort calling the tiebreak function when there are no
+ * other keys to sort on.
*/
if (state->onlyKey != NULL)
return 0;
@@ -747,15 +747,15 @@ qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
int compare;
compare = ApplyInt32SortComparator(a->datum1, a->isnull1,
- b->datum1, b->isnull1,
- &state->sortKeys[0]);
+ b->datum1, b->isnull1,
+ &state->sortKeys[0]);
if (compare != 0)
return compare;
/*
- * No need to waste effort calling the tiebreak function when there are
- * no other keys to sort on.
+ * No need to waste effort calling the tiebreak function when there are no
+ * other keys to sort on.
*/
if (state->onlyKey != NULL)
return 0;
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index fcef651c2fc..ed6de7ca941 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -1808,8 +1808,8 @@ make_template0(FILE *cmdfd)
* the new cluster should be the result of a fresh initdb.)
*
* We use "STRATEGY = file_copy" here because checkpoints during initdb
- * are cheap. "STRATEGY = wal_log" would generate more WAL, which would
- * be a little bit slower and make the new cluster a little bit bigger.
+ * are cheap. "STRATEGY = wal_log" would generate more WAL, which would be
+ * a little bit slower and make the new cluster a little bit bigger.
*/
static const char *const template0_setup[] = {
"CREATE DATABASE template0 IS_TEMPLATE = true ALLOW_CONNECTIONS = false"
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index a3397777cf2..a37f6dd9b33 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -97,27 +97,45 @@ SKIP:
if ($ENV{with_icu} eq 'yes')
{
- command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
+ command_fails_like(
+ [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
qr/initdb: error: ICU locale must be specified/,
'locale provider ICU requires --icu-locale');
- command_ok(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=en', "$tempdir/data3"],
+ command_ok(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=en',
+ "$tempdir/data3"
+ ],
'option --icu-locale');
- command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', "$tempdir/dataX"],
+ command_fails_like(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
+ "$tempdir/dataX"
+ ],
qr/FATAL: could not open collator for locale/,
'fails for invalid ICU locale');
}
else
{
- command_fails(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
- 'locale provider ICU fails since no ICU support');
+ command_fails(
+ [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
+ 'locale provider ICU fails since no ICU support');
}
-command_fails(['initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX"],
- 'fails for invalid locale provider');
+command_fails(
+ [ 'initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX" ],
+ 'fails for invalid locale provider');
-command_fails(['initdb', '--no-sync', '--locale-provider=libc', '--icu-locale=en', "$tempdir/dataX"],
- 'fails for invalid option combination');
+command_fails(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=libc', '--icu-locale=en',
+ "$tempdir/dataX"
+ ],
+ 'fails for invalid option combination');
done_testing();
diff --git a/src/bin/pg_amcheck/pg_amcheck.c b/src/bin/pg_amcheck/pg_amcheck.c
index 48cee8c1c4e..f0b818e987a 100644
--- a/src/bin/pg_amcheck/pg_amcheck.c
+++ b/src/bin/pg_amcheck/pg_amcheck.c
@@ -1074,17 +1074,17 @@ verify_btree_slot_handler(PGresult *res, PGconn *conn, void *context)
if (PQresultStatus(res) == PGRES_TUPLES_OK)
{
- int ntups = PQntuples(res);
+ int ntups = PQntuples(res);
if (ntups > 1)
{
/*
* We expect the btree checking functions to return one void row
* each, or zero rows if the check was skipped due to the object
- * being in the wrong state to be checked, so we should output some
- * sort of warning if we get anything more, not because it
- * indicates corruption, but because it suggests a mismatch between
- * amcheck and pg_amcheck versions.
+ * being in the wrong state to be checked, so we should output
+ * some sort of warning if we get anything more, not because it
+ * indicates corruption, but because it suggests a mismatch
+ * between amcheck and pg_amcheck versions.
*
* In conjunction with --progress, anything written to stderr at
* this time would present strangely to the user without an extra
diff --git a/src/bin/pg_amcheck/t/002_nonesuch.pl b/src/bin/pg_amcheck/t/002_nonesuch.pl
index 6c0f97027dd..0c07016aa0c 100644
--- a/src/bin/pg_amcheck/t/002_nonesuch.pl
+++ b/src/bin/pg_amcheck/t/002_nonesuch.pl
@@ -155,8 +155,7 @@ $node->command_checks_all(
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres/
],
- 'multipart database patterns are rejected'
-);
+ 'multipart database patterns are rejected');
# Check that a three-part schema name is rejected
$node->command_checks_all(
@@ -166,8 +165,7 @@ $node->command_checks_all(
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres\.pg_catalog/
],
- 'three part schema patterns are rejected'
-);
+ 'three part schema patterns are rejected');
# Check that a four-part table name is rejected
$node->command_checks_all(
@@ -177,39 +175,44 @@ $node->command_checks_all(
[
qr/pg_amcheck: error: improper relation name \(too many dotted names\): localhost\.postgres\.pg_catalog\.pg_class/
],
- 'four part table patterns are rejected'
-);
+ 'four part table patterns are rejected');
# Check that too many dotted names still draws an error under --no-strict-names
# That flag means that it is ok for the object to be missing, not that it is ok
# for the object name to be ungrammatical
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-t', 'this.is.a.really.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names',
+ '-t', 'this.is.a.really.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper relation name \(too many dotted names\): this\.is\.a\.really\.long\.dotted\.string/
],
- 'ungrammatical table names still draw errors under --no-strict-names'
-);
+ 'ungrammatical table names still draw errors under --no-strict-names');
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-s', 'postgres.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names', '-s',
+ 'postgres.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
],
- 'ungrammatical schema names still draw errors under --no-strict-names'
-);
+ 'ungrammatical schema names still draw errors under --no-strict-names');
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-d', 'postgres.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names', '-d',
+ 'postgres.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
],
- 'ungrammatical database names still draw errors under --no-strict-names'
-);
+ 'ungrammatical database names still draw errors under --no-strict-names');
# Likewise for exclusion patterns
$node->command_checks_all(
@@ -262,7 +265,7 @@ $node->command_checks_all(
'-r', 'postgres.none.none',
'-r', 'postgres.pg_catalog.none',
'-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ '-t', 'postgres.pg_catalog.pg_class', # This exists
],
0,
[qr/^$/],
diff --git a/src/bin/pg_amcheck/t/005_opclass_damage.pl b/src/bin/pg_amcheck/t/005_opclass_damage.pl
index a5e82082700..ce376f239cf 100644
--- a/src/bin/pg_amcheck/t/005_opclass_damage.pl
+++ b/src/bin/pg_amcheck/t/005_opclass_damage.pl
@@ -33,8 +33,7 @@ $node->safe_psql(
));
# We have not yet broken the index, so we should get no corruption
-$node->command_like(
- [ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ],
qr/^$/,
'pg_amcheck all schemas, tables and indexes reports no corruption');
diff --git a/src/bin/pg_basebackup/bbstreamer_file.c b/src/bin/pg_basebackup/bbstreamer_file.c
index 393e9f340ce..1a94fb2796c 100644
--- a/src/bin/pg_basebackup/bbstreamer_file.c
+++ b/src/bin/pg_basebackup/bbstreamer_file.c
@@ -34,7 +34,7 @@ typedef struct bbstreamer_extractor
void (*report_output_file) (const char *);
char filename[MAXPGPATH];
FILE *file;
-} bbstreamer_extractor;
+} bbstreamer_extractor;
static void bbstreamer_plain_writer_content(bbstreamer *streamer,
bbstreamer_member *member,
@@ -356,7 +356,7 @@ static void
bbstreamer_extractor_finalize(bbstreamer *streamer)
{
bbstreamer_extractor *mystreamer PG_USED_FOR_ASSERTS_ONLY
- = (bbstreamer_extractor *) streamer;
+ = (bbstreamer_extractor *) streamer;
Assert(mystreamer->file == NULL);
}
diff --git a/src/bin/pg_basebackup/bbstreamer_gzip.c b/src/bin/pg_basebackup/bbstreamer_gzip.c
index b3bfcd62ac3..e7261910d81 100644
--- a/src/bin/pg_basebackup/bbstreamer_gzip.c
+++ b/src/bin/pg_basebackup/bbstreamer_gzip.c
@@ -28,7 +28,7 @@ typedef struct bbstreamer_gzip_writer
bbstreamer base;
char *pathname;
gzFile gzfile;
-} bbstreamer_gzip_writer;
+} bbstreamer_gzip_writer;
typedef struct bbstreamer_gzip_decompressor
{
@@ -52,9 +52,9 @@ const bbstreamer_ops bbstreamer_gzip_writer_ops = {
};
static void bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
- bbstreamer_member *member,
- const char *data, int len,
- bbstreamer_archive_context context);
+ bbstreamer_member *member,
+ const char *data, int len,
+ bbstreamer_archive_context context);
static void bbstreamer_gzip_decompressor_finalize(bbstreamer *streamer);
static void bbstreamer_gzip_decompressor_free(bbstreamer *streamer);
static void *gzip_palloc(void *opaque, unsigned items, unsigned size);
@@ -214,8 +214,8 @@ bbstreamer *
bbstreamer_gzip_decompressor_new(bbstreamer *next)
{
#ifdef HAVE_LIBZ
- bbstreamer_gzip_decompressor *streamer;
- z_stream *zs;
+ bbstreamer_gzip_decompressor *streamer;
+ z_stream *zs;
Assert(next != NULL);
@@ -261,12 +261,12 @@ bbstreamer_gzip_decompressor_new(bbstreamer *next)
*/
static void
bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
- bbstreamer_member *member,
- const char *data, int len,
- bbstreamer_archive_context context)
+ bbstreamer_member *member,
+ const char *data, int len,
+ bbstreamer_archive_context context)
{
bbstreamer_gzip_decompressor *mystreamer;
- z_stream *zs;
+ z_stream *zs;
mystreamer = (bbstreamer_gzip_decompressor *) streamer;
@@ -277,7 +277,7 @@ bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
/* Process the current chunk */
while (zs->avail_in > 0)
{
- int res;
+ int res;
Assert(mystreamer->bytes_written < mystreamer->base.bbs_buffer.maxlen);
@@ -288,8 +288,9 @@ bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
/*
* This call decompresses data starting at zs->next_in and updates
- * zs->next_in * and zs->avail_in. It generates output data starting at
- * zs->next_out and updates zs->next_out and zs->avail_out accordingly.
+ * zs->next_in * and zs->avail_in. It generates output data starting
+ * at zs->next_out and updates zs->next_out and zs->avail_out
+ * accordingly.
*/
res = inflate(zs, Z_NO_FLUSH);
diff --git a/src/bin/pg_basebackup/bbstreamer_lz4.c b/src/bin/pg_basebackup/bbstreamer_lz4.c
index 6070a72cdb5..b9752354c91 100644
--- a/src/bin/pg_basebackup/bbstreamer_lz4.c
+++ b/src/bin/pg_basebackup/bbstreamer_lz4.c
@@ -27,9 +27,9 @@ typedef struct bbstreamer_lz4_frame
{
bbstreamer base;
- LZ4F_compressionContext_t cctx;
- LZ4F_decompressionContext_t dctx;
- LZ4F_preferences_t prefs;
+ LZ4F_compressionContext_t cctx;
+ LZ4F_decompressionContext_t dctx;
+ LZ4F_preferences_t prefs;
size_t bytes_written;
bool header_written;
@@ -70,9 +70,9 @@ bbstreamer *
bbstreamer_lz4_compressor_new(bbstreamer *next, pg_compress_specification *compress)
{
#ifdef USE_LZ4
- bbstreamer_lz4_frame *streamer;
- LZ4F_errorCode_t ctxError;
- LZ4F_preferences_t *prefs;
+ bbstreamer_lz4_frame *streamer;
+ LZ4F_errorCode_t ctxError;
+ LZ4F_preferences_t *prefs;
Assert(next != NULL);
@@ -119,12 +119,12 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
const char *data, int len,
bbstreamer_archive_context context)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_in,
- *next_out;
- size_t out_bound,
- compressed_size,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_in,
+ *next_out;
+ size_t out_bound,
+ compressed_size,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
@@ -146,8 +146,8 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
}
/*
- * Update the offset and capacity of output buffer based on number of bytes
- * written to output buffer.
+ * Update the offset and capacity of output buffer based on number of
+ * bytes written to output buffer.
*/
next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written;
avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written;
@@ -160,18 +160,18 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
out_bound = LZ4F_compressBound(len, &mystreamer->prefs);
if (avail_out < out_bound)
{
- bbstreamer_content(mystreamer->base.bbs_next, member,
- mystreamer->base.bbs_buffer.data,
- mystreamer->bytes_written,
- context);
-
- /* Enlarge buffer if it falls short of out bound. */
- if (mystreamer->base.bbs_buffer.maxlen < out_bound)
- enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
-
- avail_out = mystreamer->base.bbs_buffer.maxlen;
- mystreamer->bytes_written = 0;
- next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+ bbstreamer_content(mystreamer->base.bbs_next, member,
+ mystreamer->base.bbs_buffer.data,
+ mystreamer->bytes_written,
+ context);
+
+ /* Enlarge buffer if it falls short of out bound. */
+ if (mystreamer->base.bbs_buffer.maxlen < out_bound)
+ enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
+
+ avail_out = mystreamer->base.bbs_buffer.maxlen;
+ mystreamer->bytes_written = 0;
+ next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
}
/*
@@ -199,11 +199,11 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
static void
bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_out;
- size_t footer_bound,
- compressed_size,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_out;
+ size_t footer_bound,
+ compressed_size,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
@@ -212,18 +212,18 @@ bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
if ((mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written) <
footer_bound)
{
- bbstreamer_content(mystreamer->base.bbs_next, NULL,
- mystreamer->base.bbs_buffer.data,
- mystreamer->bytes_written,
- BBSTREAMER_UNKNOWN);
-
- /* Enlarge buffer if it falls short of footer bound. */
- if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
- enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
-
- avail_out = mystreamer->base.bbs_buffer.maxlen;
- mystreamer->bytes_written = 0;
- next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+ bbstreamer_content(mystreamer->base.bbs_next, NULL,
+ mystreamer->base.bbs_buffer.data,
+ mystreamer->bytes_written,
+ BBSTREAMER_UNKNOWN);
+
+ /* Enlarge buffer if it falls short of footer bound. */
+ if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
+ enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
+
+ avail_out = mystreamer->base.bbs_buffer.maxlen;
+ mystreamer->bytes_written = 0;
+ next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
}
else
{
@@ -258,7 +258,7 @@ bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
static void
bbstreamer_lz4_compressor_free(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
bbstreamer_free(streamer->bbs_next);
@@ -276,8 +276,8 @@ bbstreamer *
bbstreamer_lz4_decompressor_new(bbstreamer *next)
{
#ifdef USE_LZ4
- bbstreamer_lz4_frame *streamer;
- LZ4F_errorCode_t ctxError;
+ bbstreamer_lz4_frame *streamer;
+ LZ4F_errorCode_t ctxError;
Assert(next != NULL);
@@ -313,11 +313,11 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
const char *data, int len,
bbstreamer_archive_context context)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_in,
- *next_out;
- size_t avail_in,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_in,
+ *next_out;
+ size_t avail_in,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
@@ -327,9 +327,9 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
while (avail_in > 0)
{
- size_t ret,
- read_size,
- out_size;
+ size_t ret,
+ read_size,
+ out_size;
read_size = avail_in;
out_size = avail_out;
@@ -362,8 +362,8 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
mystreamer->bytes_written += out_size;
/*
- * If output buffer is full then forward the content to next streamer and
- * update the output buffer.
+ * If output buffer is full then forward the content to next streamer
+ * and update the output buffer.
*/
if (mystreamer->bytes_written >= mystreamer->base.bbs_buffer.maxlen)
{
@@ -390,7 +390,7 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
static void
bbstreamer_lz4_decompressor_finalize(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
@@ -412,7 +412,7 @@ bbstreamer_lz4_decompressor_finalize(bbstreamer *streamer)
static void
bbstreamer_lz4_decompressor_free(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
bbstreamer_free(streamer->bbs_next);
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 6be04544763..4adb170d464 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -58,7 +58,7 @@ typedef struct TablespaceList
typedef struct ArchiveStreamState
{
int tablespacenum;
- pg_compress_specification *compress;
+ pg_compress_specification *compress;
bbstreamer *streamer;
bbstreamer *manifest_inject_streamer;
PQExpBuffer manifest_buffer;
@@ -173,6 +173,7 @@ static int bgpipe[2] = {-1, -1};
/* Handle to child process */
static pid_t bgchild = -1;
static bool in_log_streamer = false;
+
/* Flag to indicate if child process exited unexpectedly */
static volatile sig_atomic_t bgchild_exited = false;
@@ -567,8 +568,8 @@ LogStreamerMain(logstreamer_param *param)
*/
#ifdef WIN32
/*
- * In order to signal the main thread of an ungraceful exit we
- * set the same flag that we use on Unix to signal SIGCHLD.
+ * In order to signal the main thread of an ungraceful exit we set the
+ * same flag that we use on Unix to signal SIGCHLD.
*/
bgchild_exited = true;
#endif
@@ -1010,7 +1011,7 @@ parse_compress_options(char *option, char **algorithm, char **detail,
}
else
{
- char *alg;
+ char *alg;
alg = palloc((sep - option) + 1);
memcpy(alg, option, sep - option);
@@ -1133,11 +1134,11 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
/*
* We have to parse the archive if (1) we're suppose to extract it, or if
- * (2) we need to inject backup_manifest or recovery configuration into it.
- * However, we only know how to parse tar archives.
+ * (2) we need to inject backup_manifest or recovery configuration into
+ * it. However, we only know how to parse tar archives.
*/
must_parse_archive = (format == 'p' || inject_manifest ||
- (spclocation == NULL && writerecoveryconf));
+ (spclocation == NULL && writerecoveryconf));
/* At present, we only know how to parse tar archives. */
if (must_parse_archive && !is_tar && !is_compressed_tar)
@@ -1178,8 +1179,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
/*
* In tar format, we just write the archive without extracting it.
* Normally, we write it to the archive name provided by the caller,
- * but when the base directory is "-" that means we need to write
- * to standard output.
+ * but when the base directory is "-" that means we need to write to
+ * standard output.
*/
if (strcmp(basedir, "-") == 0)
{
@@ -1233,16 +1234,16 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
}
/*
- * If we're supposed to inject the backup manifest into the results,
- * it should be done here, so that the file content can be injected
- * directly, without worrying about the details of the tar format.
+ * If we're supposed to inject the backup manifest into the results, it
+ * should be done here, so that the file content can be injected directly,
+ * without worrying about the details of the tar format.
*/
if (inject_manifest)
manifest_inject_streamer = streamer;
/*
- * If this is the main tablespace and we're supposed to write
- * recovery information, arrange to do that.
+ * If this is the main tablespace and we're supposed to write recovery
+ * information, arrange to do that.
*/
if (spclocation == NULL && writerecoveryconf)
{
@@ -1253,11 +1254,10 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
}
/*
- * If we're doing anything that involves understanding the contents of
- * the archive, we'll need to parse it. If not, we can skip parsing it,
- * but old versions of the server send improperly terminated tarfiles,
- * so if we're talking to such a server we'll need to add the terminator
- * here.
+ * If we're doing anything that involves understanding the contents of the
+ * archive, we'll need to parse it. If not, we can skip parsing it, but
+ * old versions of the server send improperly terminated tarfiles, so if
+ * we're talking to such a server we'll need to add the terminator here.
*/
if (must_parse_archive)
streamer = bbstreamer_tar_parser_new(streamer);
@@ -1265,8 +1265,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
streamer = bbstreamer_tar_terminator_new(streamer);
/*
- * If the user has requested a server compressed archive along with archive
- * extraction at client then we need to decompress it.
+ * If the user has requested a server compressed archive along with
+ * archive extraction at client then we need to decompress it.
*/
if (format == 'p')
{
@@ -1848,17 +1848,17 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
}
if (maxrate > 0)
AppendIntegerCommandOption(&buf, use_new_option_syntax, "MAX_RATE",
- maxrate);
+ maxrate);
if (format == 't')
AppendPlainCommandOption(&buf, use_new_option_syntax, "TABLESPACE_MAP");
if (!verify_checksums)
{
if (use_new_option_syntax)
AppendIntegerCommandOption(&buf, use_new_option_syntax,
- "VERIFY_CHECKSUMS", 0);
+ "VERIFY_CHECKSUMS", 0);
else
AppendPlainCommandOption(&buf, use_new_option_syntax,
- "NOVERIFY_CHECKSUMS");
+ "NOVERIFY_CHECKSUMS");
}
if (manifest)
@@ -1992,8 +1992,8 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
* we do anything anyway.
*
* Note that this is skipped for tar format backups and backups that
- * the server is storing to a target location, since in that case
- * we won't be storing anything into these directories and thus should
+ * the server is storing to a target location, since in that case we
+ * won't be storing anything into these directories and thus should
* not create them.
*/
if (backup_target == NULL && format == 'p' && !PQgetisnull(res, i, 1))
@@ -2019,8 +2019,8 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
*/
if (includewal == STREAM_WAL)
{
- pg_compress_algorithm wal_compress_algorithm;
- int wal_compress_level;
+ pg_compress_algorithm wal_compress_algorithm;
+ int wal_compress_level;
if (verbose)
pg_log_info("starting background WAL receiver");
@@ -2315,8 +2315,8 @@ main(int argc, char **argv)
int option_index;
char *compression_algorithm = "none";
char *compression_detail = NULL;
- CompressionLocation compressloc = COMPRESS_LOCATION_UNSPECIFIED;
- pg_compress_specification client_compress;
+ CompressionLocation compressloc = COMPRESS_LOCATION_UNSPECIFIED;
+ pg_compress_specification client_compress;
pg_logging_init(argv[0]);
progname = get_progname(argv[0]);
@@ -2539,8 +2539,8 @@ main(int argc, char **argv)
/*
* If the user has not specified where to perform backup compression,
- * default to the client, unless the user specified --target, in which case
- * the server is the only choice.
+ * default to the client, unless the user specified --target, in which
+ * case the server is the only choice.
*/
if (compressloc == COMPRESS_LOCATION_UNSPECIFIED)
{
@@ -2551,14 +2551,14 @@ main(int argc, char **argv)
}
/*
- * If any compression that we're doing is happening on the client side,
- * we must try to parse the compression algorithm and detail, but if it's
- * all on the server side, then we're just going to pass through whatever
- * was requested and let the server decide what to do.
+ * If any compression that we're doing is happening on the client side, we
+ * must try to parse the compression algorithm and detail, but if it's all
+ * on the server side, then we're just going to pass through whatever was
+ * requested and let the server decide what to do.
*/
if (compressloc == COMPRESS_LOCATION_CLIENT)
{
- pg_compress_algorithm alg;
+ pg_compress_algorithm alg;
char *error_detail;
if (!parse_compress_algorithm(compression_algorithm, &alg))
@@ -2579,8 +2579,8 @@ main(int argc, char **argv)
}
/*
- * Can't perform client-side compression if the backup is not being
- * sent to the client.
+ * Can't perform client-side compression if the backup is not being sent
+ * to the client.
*/
if (backup_target != NULL && compressloc == COMPRESS_LOCATION_CLIENT)
{
@@ -2724,13 +2724,14 @@ main(int argc, char **argv)
atexit(disconnect_atexit);
#ifndef WIN32
+
/*
* Trap SIGCHLD to be able to handle the WAL stream process exiting. There
- * is no SIGCHLD on Windows, there we rely on the background thread setting
- * the signal variable on unexpected but graceful exit. If the WAL stream
- * thread crashes on Windows it will bring down the entire process as it's
- * a thread, so there is nothing to catch should that happen. A crash on
- * UNIX will be caught by the signal handler.
+ * is no SIGCHLD on Windows, there we rely on the background thread
+ * setting the signal variable on unexpected but graceful exit. If the WAL
+ * stream thread crashes on Windows it will bring down the entire process
+ * as it's a thread, so there is nothing to catch should that happen. A
+ * crash on UNIX will be caught by the signal handler.
*/
pqsignal(SIGCHLD, sigchld_handler);
#endif
diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c
index 86c0493a949..299b9b76213 100644
--- a/src/bin/pg_basebackup/streamutil.c
+++ b/src/bin/pg_basebackup/streamutil.c
@@ -619,7 +619,7 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
/* pg_recvlogical doesn't use an exported snapshot, so suppress */
if (use_new_option_syntax)
AppendStringCommandOption(query, use_new_option_syntax,
- "SNAPSHOT", "nothing");
+ "SNAPSHOT", "nothing");
else
AppendPlainCommandOption(query, use_new_option_syntax,
"NOEXPORT_SNAPSHOT");
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 056fcf35976..87a211315f0 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -28,8 +28,9 @@ my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
umask(0077);
# Initialize node without replication settings
-$node->init(extra => ['--data-checksums'],
- auth_extra => [ '--create-role', 'backupuser' ]);
+$node->init(
+ extra => ['--data-checksums'],
+ auth_extra => [ '--create-role', 'backupuser' ]);
$node->start;
my $pgdata = $node->data_dir;
@@ -85,10 +86,9 @@ $node->restart;
# Now that we have a server that supports replication commands, test whether
# certain invalid compression commands fail on the client side with client-side
# compression and on the server side with server-side compression.
-my $client_fails =
- 'pg_basebackup: error: ';
+my $client_fails = 'pg_basebackup: error: ';
my $server_fails =
- 'pg_basebackup: error: could not initiate base backup: ERROR: ';
+ 'pg_basebackup: error: could not initiate base backup: ERROR: ';
my @compression_failure_tests = (
[
'extrasquishy',
@@ -134,8 +134,7 @@ my @compression_failure_tests = (
'gzip:workers=3',
'invalid compression specification: compression algorithm "gzip" does not accept a worker count',
'failure on worker count for gzip'
- ],
-);
+ ],);
for my $cft (@compression_failure_tests)
{
my $cfail = quotemeta($client_fails . $cft->[1]);
@@ -143,10 +142,13 @@ for my $cft (@compression_failure_tests)
$node->command_fails_like(
[ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', $cft->[0] ],
qr/$cfail/,
- 'client '. $cft->[2]);
+ 'client ' . $cft->[2]);
$node->command_fails_like(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress',
- 'server-' . $cft->[0] ],
+ [
+ 'pg_basebackup', '-D',
+ "$tempdir/backup", '--compress',
+ 'server-' . $cft->[0]
+ ],
qr/$sfail/,
'server ' . $cft->[2]);
}
@@ -189,7 +191,8 @@ foreach my $filename (@tempRelationFiles)
}
# Run base backup.
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
+$node->command_ok(
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
@@ -326,12 +329,12 @@ $node->start;
# to our physical temp location. That way we can use shorter names
# for the tablespace directories, which hopefully won't run afoul of
# the 99 character length limit.
-my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
+my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
my $real_sys_tempdir = "$sys_tempdir/tempdir";
dir_symlink "$tempdir", $real_sys_tempdir;
mkdir "$tempdir/tblspc1";
-my $realTsDir = "$real_sys_tempdir/tblspc1";
+my $realTsDir = "$real_sys_tempdir/tblspc1";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
$node->safe_psql('postgres',
@@ -368,7 +371,8 @@ SKIP:
my $repTsDir = "$tempdir/tblspc1replica";
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
mkdir $repTsDir;
- PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0], '-C', $repTsDir);
+ PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
+ '-C', $repTsDir);
# Update tablespace map to point to new directory.
# XXX Ideally pg_basebackup would handle this.
@@ -503,7 +507,8 @@ mkdir "$tempdir/$superlongname";
$realTsDir = "$real_sys_tempdir/$superlongname";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
+$node->command_ok(
+ [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
'pg_basebackup tar with long symlink target');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
rmtree("$tempdir/tarbackup_l3");
@@ -541,7 +546,10 @@ ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxs");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream', '-Ft' ],
+ [
+ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
+ '-Ft'
+ ],
'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
@@ -570,7 +578,10 @@ $node->command_fails_like(
qr/unrecognized target/,
'backup target unrecognized');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-D', "$tempdir/blackhole" ],
+ [
+ @pg_basebackup_defs, '--target', 'blackhole', '-X',
+ 'none', '-D', "$tempdir/blackhole"
+ ],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
$node->command_fails_like(
@@ -581,7 +592,11 @@ $node->command_ok(
[ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ],
'backup target blackhole');
$node->command_ok(
- [ @pg_basebackup_defs, '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '--target',
+ "server:$tempdir/backuponserver", '-X',
+ 'none'
+ ],
'backup target server');
ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
rmtree("$tempdir/backuponserver");
@@ -590,9 +605,14 @@ $node->command_ok(
[qw(createuser --replication --role=pg_write_server_files backupuser)],
'create backup user');
$node->command_ok(
- [ @pg_basebackup_defs, '-U', 'backupuser', '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '-U', 'backupuser', '--target',
+ "server:$tempdir/backuponserver",
+ '-X', 'none'
+ ],
'backup target server');
-ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created as non-superuser');
+ok( -f "$tempdir/backuponserver/base.tar",
+ 'backup tar was created as non-superuser');
rmtree("$tempdir/backuponserver");
$node->command_fails(
@@ -617,7 +637,10 @@ $node->command_fails(
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-D', "$tempdir/blackhole" ],
+ [
+ @pg_basebackup_defs, '--target', 'blackhole', '-D',
+ "$tempdir/blackhole"
+ ],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
@@ -648,7 +671,11 @@ $node->command_fails(
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
+ [
+ @pg_basebackup_defs, '-D',
+ "$tempdir/backupxs_slot", '-C',
+ '-S', 'slot0'
+ ],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
@@ -667,7 +694,11 @@ isnt(
'restart LSN of new slot is not null');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ],
+ [
+ @pg_basebackup_defs, '-D',
+ "$tempdir/backupxs_slot1", '-C',
+ '-S', 'slot0'
+ ],
'pg_basebackup fails with -C -S and a previously existing slot');
$node->safe_psql('postgres',
@@ -677,7 +708,10 @@ my $lsn = $node->safe_psql('postgres',
);
is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S', 'slot1', '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
+ 'slot1', '-X', 'none'
+ ],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
@@ -843,8 +877,10 @@ my $sigchld_bb_timeout =
my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
my $sigchld_bb = IPC::Run::start(
[
- @pg_basebackup_defs, '--wal-method=stream', '-D', "$tempdir/sigchld",
- '--max-rate=32', '-d', $node->connstr('postgres')
+ @pg_basebackup_defs, '--wal-method=stream',
+ '-D', "$tempdir/sigchld",
+ '--max-rate=32', '-d',
+ $node->connstr('postgres')
],
'<',
\$sigchld_bb_stdin,
@@ -854,16 +890,18 @@ my $sigchld_bb = IPC::Run::start(
\$sigchld_bb_stderr,
$sigchld_bb_timeout);
-is($node->poll_query_until('postgres',
- "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE " .
- "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' " .
- "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
+is( $node->poll_query_until(
+ 'postgres',
+ "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE "
+ . "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' "
+ . "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
"1",
"Walsender killed");
-ok(pump_until($sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr,
- qr/background process terminated unexpectedly/),
- 'background process exit message');
+ok( pump_until(
+ $sigchld_bb, $sigchld_bb_timeout,
+ \$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
+ 'background process exit message');
$sigchld_bb->finish();
done_testing();
diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
index 465394404fd..4f07bb89078 100644
--- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl
+++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
@@ -45,7 +45,7 @@ $primary->command_ok(
'creating a replication slot');
my $slot = $primary->slot($slot_name);
is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
-is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
+is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
'dropping a replication slot');
is($primary->slot($slot_name)->{'slot_type'},
@@ -281,7 +281,7 @@ $standby->psql(
$primary->wait_for_catchup($standby);
# Get a walfilename from before the promotion to make sure it is archived
# after promotion
-my $standby_slot = $standby->slot($archive_slot);
+my $standby_slot = $standby->slot($archive_slot);
my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
# pg_walfile_name() is not supported while in recovery, so use the primary
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index 201196f9573..38576c2e008 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -78,7 +78,8 @@ $node->command_ok(
[
'pg_recvlogical', '-S',
'test', '-d',
- $node->connstr('postgres'), '--create-slot', '--two-phase'
+ $node->connstr('postgres'), '--create-slot',
+ '--two-phase'
],
'slot with two-phase created');
@@ -87,16 +88,18 @@ isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
$node->safe_psql('postgres',
"BEGIN; INSERT INTO test_table values (11); PREPARE TRANSACTION 'test'");
-$node->safe_psql('postgres',
- "COMMIT PREPARED 'test'");
-$nextlsn =
- $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
+$node->safe_psql('postgres', "COMMIT PREPARED 'test'");
+$nextlsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
chomp($nextlsn);
$node->command_fails(
[
- 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
- '--start', '--endpos', "$nextlsn", '--two-phase', '--no-loop', '-f', '-'
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
+ $node->connstr('postgres'), '--start',
+ '--endpos', "$nextlsn",
+ '--two-phase', '--no-loop',
+ '-f', '-'
],
'incorrect usage');
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index f605e02da88..dd78e5bc660 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -1750,7 +1750,7 @@ typedef BOOL (WINAPI * __QueryInformationJobObject) (HANDLE, JOBOBJECTINFOCLASS,
* achieves the goal of postmaster running in a similar environment as pg_ctl.
*/
static void
-InheritStdHandles(STARTUPINFO* si)
+InheritStdHandles(STARTUPINFO *si)
{
si->dwFlags |= STARTF_USESTDHANDLES;
si->hStdInput = GetStdHandle(STD_INPUT_HANDLE);
@@ -1802,8 +1802,8 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser
si.cb = sizeof(si);
/*
- * Set stdin/stdout/stderr handles to be inherited in the child
- * process. That allows postmaster and the processes it starts to perform
+ * Set stdin/stdout/stderr handles to be inherited in the child process.
+ * That allows postmaster and the processes it starts to perform
* additional checks to see if running in a service (otherwise they get
* the default console handles - which point to "somewhere").
*/
diff --git a/src/bin/pg_ctl/t/002_status.pl b/src/bin/pg_ctl/t/002_status.pl
index 2503d74a76d..ab26ee686ca 100644
--- a/src/bin/pg_ctl/t/002_status.pl
+++ b/src/bin/pg_ctl/t/002_status.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
4, 'pg_ctl status with nonexistent directory');
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 24e42fa5d7d..77fe51a3a53 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -2580,12 +2580,12 @@ ReadToc(ArchiveHandle *AH)
is_supported = false;
else
{
- tmp = ReadStr(AH);
+ tmp = ReadStr(AH);
- if (strcmp(tmp, "true") == 0)
- is_supported = false;
+ if (strcmp(tmp, "true") == 0)
+ is_supported = false;
- free(tmp);
+ free(tmp);
}
if (!is_supported)
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index c3b9c365d5c..3443eef6b0e 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -956,11 +956,11 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
int byt;
/*
- * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal() inside
- * ReadInt rather than returning EOF. It doesn't seem worth jumping
- * through hoops to deal with that case better, because no such files are
- * likely to exist in the wild: only some 7.1 development versions of
- * pg_dump ever generated such files.
+ * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal()
+ * inside ReadInt rather than returning EOF. It doesn't seem worth
+ * jumping through hoops to deal with that case better, because no such
+ * files are likely to exist in the wild: only some 7.1 development
+ * versions of pg_dump ever generated such files.
*/
if (AH->version < K_VERS_1_3)
*type = BLK_DATA;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 786d592e2ba..7cc9c72e492 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -1318,8 +1318,8 @@ expand_schema_name_patterns(Archive *fout,
for (cell = patterns->head; cell; cell = cell->next)
{
- PQExpBufferData dbbuf;
- int dotcnt;
+ PQExpBufferData dbbuf;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_namespace n\n");
@@ -1376,7 +1376,7 @@ expand_extension_name_patterns(Archive *fout,
*/
for (cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_extension e\n");
@@ -1429,7 +1429,7 @@ expand_foreign_server_name_patterns(Archive *fout,
for (cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_foreign_server s\n");
@@ -1481,8 +1481,8 @@ expand_table_name_patterns(Archive *fout,
for (cell = patterns->head; cell; cell = cell->next)
{
- PQExpBufferData dbbuf;
- int dotcnt;
+ PQExpBufferData dbbuf;
+ int dotcnt;
/*
* Query must remain ABSOLUTELY devoid of unqualified names. This
@@ -4342,7 +4342,8 @@ dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo)
{
/*
* It's necessary to add parentheses around the expression because
- * pg_get_expr won't supply the parentheses for things like WHERE TRUE.
+ * pg_get_expr won't supply the parentheses for things like WHERE
+ * TRUE.
*/
appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
}
@@ -4858,8 +4859,8 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
/*
* Not every relation has storage. Also, in a pre-v12 database,
- * partitioned tables have a relfilenode, which should not be preserved
- * when upgrading.
+ * partitioned tables have a relfilenode, which should not be
+ * preserved when upgrading.
*/
if (OidIsValid(relfilenode) && relkind != RELKIND_PARTITIONED_TABLE)
appendPQExpBuffer(upgrade_buffer,
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 52f9f7c4d66..ae41a652d79 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -1269,7 +1269,7 @@ expand_dbname_patterns(PGconn *conn,
for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT datname FROM pg_catalog.pg_database n\n");
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 65e6c01fed7..a583c8a6d24 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
#########################################
# Basic checks
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 3b31e13f62b..1f08716f690 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
###############################################################
# Definition of the pg_dump runs to make.
@@ -2439,7 +2439,7 @@ my %tests = (
'CREATE PUBLICATION pub3' => {
create_order => 50,
create_sql => 'CREATE PUBLICATION pub3;',
- regexp => qr/^
+ regexp => qr/^
\QCREATE PUBLICATION pub3 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -2448,7 +2448,7 @@ my %tests = (
'CREATE PUBLICATION pub4' => {
create_order => 50,
create_sql => 'CREATE PUBLICATION pub4;',
- regexp => qr/^
+ regexp => qr/^
\QCREATE PUBLICATION pub4 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -2501,7 +2501,8 @@ my %tests = (
unlike => { exclude_dump_test_schema => 1, },
},
- 'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)' => {
+ 'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)'
+ => {
create_order => 52,
create_sql =>
'ALTER PUBLICATION pub1 ADD TABLE dump_test.test_seventh_table (col3, col2) WHERE (col1 = 1);',
@@ -2510,7 +2511,7 @@ my %tests = (
/xm,
like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
- },
+ },
'ALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test' => {
create_order => 51,
@@ -2519,7 +2520,7 @@ my %tests = (
regexp => qr/^
\QALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test;\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
},
@@ -2540,14 +2541,15 @@ my %tests = (
regexp => qr/^
\QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_table WHERE ((col1 > 0));\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
exclude_test_table => 1,
},
},
- 'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');' => {
+ 'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');'
+ => {
create_order => 52,
create_sql =>
'ALTER PUBLICATION pub4 ADD TABLE dump_test.test_second_table WHERE (col2 = \'test\');',
@@ -2556,7 +2558,7 @@ my %tests = (
/xm,
like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
- },
+ },
'CREATE SCHEMA public' => {
regexp => qr/^CREATE SCHEMA public;/m,
@@ -3979,14 +3981,12 @@ command_fails_like(
$node->command_fails_like(
[ 'pg_dumpall', '--exclude-database', '.' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
- 'pg_dumpall: option --exclude-database rejects multipart pattern "."'
-);
+ 'pg_dumpall: option --exclude-database rejects multipart pattern "."');
$node->command_fails_like(
[ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
- 'pg_dumpall: option --exclude-database rejects multipart database names'
-);
+ 'pg_dumpall: option --exclude-database rejects multipart database names');
#########################################
# Test valid database exclusion patterns
@@ -4002,20 +4002,17 @@ $node->command_ok(
$node->command_fails_like(
[ 'pg_dump', '--schema', 'myhost.mydb.myschema' ],
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
- 'pg_dump: option --schema rejects three-part schema names'
-);
+ 'pg_dump: option --schema rejects three-part schema names');
$node->command_fails_like(
[ 'pg_dump', '--schema', 'otherdb.myschema' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
- 'pg_dump: option --schema rejects cross-database multipart schema names'
-);
+ 'pg_dump: option --schema rejects cross-database multipart schema names');
$node->command_fails_like(
[ 'pg_dump', '--schema', '.' ],
qr/pg_dump: error: cross-database references are not implemented: \./,
- 'pg_dump: option --schema rejects degenerate two-part schema name: "."'
-);
+ 'pg_dump: option --schema rejects degenerate two-part schema name: "."');
$node->command_fails_like(
[ 'pg_dump', '--schema', '"some.other.db".myschema' ],
@@ -4035,17 +4032,18 @@ $node->command_fails_like(
$node->command_fails_like(
[ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ],
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
- 'pg_dump: option --table rejects four-part table names'
-);
+ 'pg_dump: option --table rejects four-part table names');
$node->command_fails_like(
[ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
- 'pg_dump: option --table rejects cross-database three part table names'
-);
+ 'pg_dump: option --table rejects cross-database three part table names');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--table', '"some.other.db".pg_catalog.pg_class' ],
+ [
+ 'pg_dump', '-p', "$port", '--table',
+ '"some.other.db".pg_catalog.pg_class'
+ ],
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
'pg_dump: option --table rejects cross-database three part table names with embedded dots'
);
diff --git a/src/bin/pg_dump/t/003_pg_dump_with_server.pl b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
index c2848663264..a0b23aae0fb 100644
--- a/src/bin/pg_dump/t/003_pg_dump_with_server.pl
+++ b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
my $node = PostgreSQL::Test::Cluster->new('main');
my $port = $node->port;
diff --git a/src/bin/pg_dump/t/010_dump_connstr.pl b/src/bin/pg_dump/t/010_dump_connstr.pl
index 7a745ade0fb..6e497447c35 100644
--- a/src/bin/pg_dump/t/010_dump_connstr.pl
+++ b/src/bin/pg_dump/t/010_dump_connstr.pl
@@ -30,8 +30,10 @@ my $dbname1 =
. generate_ascii_string(1, 9)
. generate_ascii_string(11, 12)
. generate_ascii_string(14, 33)
- . ($PostgreSQL::Test::Utils::windows_os ? '' : '"x"') # IPC::Run mishandles '"' on Windows
- . generate_ascii_string(35, 43) # skip ','
+ . ($PostgreSQL::Test::Utils::windows_os
+ ? ''
+ : '"x"') # IPC::Run mishandles '"' on Windows
+ . generate_ascii_string(35, 43) # skip ','
. generate_ascii_string(45, 54);
my $dbname2 = 'regression' . generate_ascii_string(55, 65) # skip 'B'-'W'
. generate_ascii_string(88, 99) # skip 'd'-'w'
@@ -171,7 +173,8 @@ system_log('cat', $plain);
my ($stderr, $result);
my $restore_super = qq{regress_a'b\\c=d\\ne"f};
$restore_super =~ s/"//g
- if $PostgreSQL::Test::Utils::windows_os; # IPC::Run mishandles '"' on Windows
+ if
+ $PostgreSQL::Test::Utils::windows_os; # IPC::Run mishandles '"' on Windows
# Restore full dump through psql using environment variables for
diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c
index d61067f6b2e..62529310415 100644
--- a/src/bin/pg_rewind/filemap.c
+++ b/src/bin/pg_rewind/filemap.c
@@ -139,9 +139,9 @@ static const struct exclude_list_item excludeFiles[] =
{"pg_internal.init", true}, /* defined as RELCACHE_INIT_FILENAME */
/*
- * If there is a backup_label or tablespace_map file, it indicates that
- * a recovery failed and this cluster probably can't be rewound, but
- * exclude them anyway if they are found.
+ * If there is a backup_label or tablespace_map file, it indicates that a
+ * recovery failed and this cluster probably can't be rewound, but exclude
+ * them anyway if they are found.
*/
{"backup_label", false}, /* defined as BACKUP_LABEL_FILE */
{"tablespace_map", false}, /* defined as TABLESPACE_MAP */
diff --git a/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
index 805935c6fd5..5aafe586e14 100644
--- a/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
+++ b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
@@ -20,7 +20,8 @@ sub run_test
{
my $test_mode = shift;
- my $primary_xlogdir = "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
+ my $primary_xlogdir =
+ "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
rmtree($primary_xlogdir);
RewindTest::setup_cluster($test_mode);
diff --git a/src/bin/pg_rewind/t/009_growing_files.pl b/src/bin/pg_rewind/t/009_growing_files.pl
index a5a58dbe060..9422828712a 100644
--- a/src/bin/pg_rewind/t/009_growing_files.pl
+++ b/src/bin/pg_rewind/t/009_growing_files.pl
@@ -51,12 +51,13 @@ append_to_file "$standby_pgdata/tst_both_dir/file1", 'a';
# copy operation and the result will be an error.
my $ret = run_log(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync',
],
- '2>>', "$standby_pgdata/tst_both_dir/file1");
+ '2>>',
+ "$standby_pgdata/tst_both_dir/file1");
ok(!$ret, 'Error out on copying growing file');
# Ensure that the files are of different size, the final error message should
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index 8fd1f4b9de4..98b66b01f82 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -101,8 +101,8 @@ sub check_query
],
'>', \$stdout, '2>', \$stderr;
- is($result, 1, "$test_name: psql exit code");
- is($stderr, '', "$test_name: psql no stderr");
+ is($result, 1, "$test_name: psql exit code");
+ is($stderr, '', "$test_name: psql no stderr");
is($stdout, $expected_stdout, "$test_name: query result matches");
return;
@@ -115,7 +115,8 @@ sub setup_cluster
# Initialize primary, data checksums are mandatory
$node_primary =
- PostgreSQL::Test::Cluster->new('primary' . ($extra_name ? "_${extra_name}" : ''));
+ PostgreSQL::Test::Cluster->new(
+ 'primary' . ($extra_name ? "_${extra_name}" : ''));
# Set up pg_hba.conf and pg_ident.conf for the role running
# pg_rewind. This role is used for all the tests, and has
@@ -163,7 +164,8 @@ sub create_standby
my $extra_name = shift;
$node_standby =
- PostgreSQL::Test::Cluster->new('standby' . ($extra_name ? "_${extra_name}" : ''));
+ PostgreSQL::Test::Cluster->new(
+ 'standby' . ($extra_name ? "_${extra_name}" : ''));
$node_primary->backup('my_backup');
$node_standby->init_from_backup($node_primary, 'my_backup');
my $connstr_primary = $node_primary->connstr();
@@ -305,7 +307,8 @@ sub run_pg_rewind
# segments from the old primary to the archives. These
# will be used by pg_rewind.
rmtree($node_primary->archive_dir);
- PostgreSQL::Test::RecursiveCopy::copypath($node_primary->data_dir . "/pg_wal",
+ PostgreSQL::Test::RecursiveCopy::copypath(
+ $node_primary->data_dir . "/pg_wal",
$node_primary->archive_dir);
# Fast way to remove entire directory content
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 76b8dab4b73..8372a85e6ef 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -51,7 +51,8 @@ if ( (defined($ENV{olddump}) && !defined($ENV{oldinstall}))
my $tempdir = PostgreSQL::Test::Utils::tempdir;
# Initialize node to upgrade
-my $oldnode = PostgreSQL::Test::Cluster->new('old_node',
+my $oldnode =
+ PostgreSQL::Test::Cluster->new('old_node',
install_path => $ENV{oldinstall});
# To increase coverage of non-standard segment size and group access without
@@ -132,7 +133,7 @@ if (defined($ENV{oldinstall}))
$oldnode->command_ok(
[
'psql', '-X',
- '-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
+ '-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
'regression'
]);
}
diff --git a/src/bin/pg_upgrade/util.c b/src/bin/pg_upgrade/util.c
index 414de063496..9edfe7c3605 100644
--- a/src/bin/pg_upgrade/util.c
+++ b/src/bin/pg_upgrade/util.c
@@ -143,6 +143,7 @@ pg_log_v(eLogType type, const char *fmt, va_list ap)
break;
case PG_STATUS:
+
/*
* For output to a display, do leading truncation. Append \r so
* that the next message is output at the start of the line.
diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl
index 843016ad80c..3dba7d8a698 100644
--- a/src/bin/pg_verifybackup/t/003_corruption.pl
+++ b/src/bin/pg_verifybackup/t/003_corruption.pl
@@ -16,7 +16,7 @@ $primary->start;
# Include a user-defined tablespace in the hopes of detecting problems in that
# area.
-my $source_ts_path =PostgreSQL::Test::Utils::tempdir_short();
+my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
my $source_ts_prefix = $source_ts_path;
$source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!;
diff --git a/src/bin/pg_verifybackup/t/004_options.pl b/src/bin/pg_verifybackup/t/004_options.pl
index 6fdd74e5eea..8cda66ca001 100644
--- a/src/bin/pg_verifybackup/t/004_options.pl
+++ b/src/bin/pg_verifybackup/t/004_options.pl
@@ -15,7 +15,8 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_options';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
"base backup ok");
# Verify that pg_verifybackup -q succeeds and produces no output.
diff --git a/src/bin/pg_verifybackup/t/005_bad_manifest.pl b/src/bin/pg_verifybackup/t/005_bad_manifest.pl
index 48fecfa3152..b9573c57426 100644
--- a/src/bin/pg_verifybackup/t/005_bad_manifest.pl
+++ b/src/bin/pg_verifybackup/t/005_bad_manifest.pl
@@ -12,10 +12,8 @@ use Test::More;
my $tempdir = PostgreSQL::Test::Utils::tempdir;
-test_bad_manifest(
- 'input string ended unexpectedly',
- qr/could not parse backup manifest: parsing failed/,
- <<EOM);
+test_bad_manifest('input string ended unexpectedly',
+ qr/could not parse backup manifest: parsing failed/, <<EOM);
{
EOM
diff --git a/src/bin/pg_verifybackup/t/007_wal.pl b/src/bin/pg_verifybackup/t/007_wal.pl
index bef2701ef75..6e9fafcd55a 100644
--- a/src/bin/pg_verifybackup/t/007_wal.pl
+++ b/src/bin/pg_verifybackup/t/007_wal.pl
@@ -15,7 +15,8 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_wal';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
"base backup ok");
# Rename pg_wal.
@@ -69,7 +70,8 @@ $primary->safe_psql('postgres', 'SELECT pg_switch_wal()');
my $backup_path2 = $primary->backup_dir . '/test_tli';
# The base backup run below does a checkpoint, that removes the first segment
# of the current timeline.
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
"base backup 2 ok");
command_ok(
[ 'pg_verifybackup', $backup_path2 ],
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index 915249a19de..4c4959516dd 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -16,89 +16,90 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/server-backup';
+my $backup_path = $primary->backup_dir . '/server-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'server-gzip'],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'server-gzip' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'server-lz4'],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'server-lz4' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => [ '-d', '-m'],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => [ '-d', '-m' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'server-zstd'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'server-zstd' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
- }
-);
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ });
for my $tc (@test_configuration)
{
my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 3
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
skip "no decompressor available for $method", 3
if exists $tc->{'decompress_program'}
&& (!defined $tc->{'decompress_program'}
- || $tc->{'decompress_program'} eq '');
+ || $tc->{'decompress_program'} eq '');
# Take a server-side backup.
my @backup = (
- 'pg_basebackup', '--no-sync', '-cfast', '--target',
- "server:$backup_path", '-Xfetch'
- );
- push @backup, @{$tc->{'backup_flags'}};
+ 'pg_basebackup', '--no-sync',
+ '-cfast', '--target',
+ "server:$backup_path", '-Xfetch');
+ push @backup, @{ $tc->{'backup_flags'} };
$primary->command_ok(\@backup,
- "server side backup, compression $method");
+ "server side backup, compression $method");
# Verify that the we got the files we expected.
my $backup_files = join(',',
sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
- my $expected_backup_files = join(',',
- sort ('backup_manifest', $tc->{'backup_archive'}));
- is($backup_files,$expected_backup_files,
+ my $expected_backup_files =
+ join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+ is($backup_files, $expected_backup_files,
"found expected backup files, compression $method");
# Decompress.
if (exists $tc->{'decompress_program'})
{
my @decompress = ($tc->{'decompress_program'});
- push @decompress, @{$tc->{'decompress_flags'}}
- if $tc->{'decompress_flags'};
+ push @decompress, @{ $tc->{'decompress_flags'} }
+ if $tc->{'decompress_flags'};
push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
system_or_bail(@decompress);
}
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
# don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
- if (!defined $tar || $tar eq '');
+ if (!defined $tar || $tar eq '');
# Untar.
mkdir($extract_path);
@@ -106,8 +107,12 @@ for my $tc (@test_configuration)
'-C', $extract_path);
# Verify.
- $primary->command_ok([ 'pg_verifybackup', '-n',
- '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+ $primary->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "$backup_path/backup_manifest",
+ '-e', $extract_path
+ ],
"verify backup, compression $method");
}
diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl
index d6f11b95535..56889e1ece9 100644
--- a/src/bin/pg_verifybackup/t/009_extract.pl
+++ b/src/bin/pg_verifybackup/t/009_extract.pl
@@ -17,46 +17,47 @@ $primary->start;
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'server-gzip:5'],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'backup_flags' => [ '--compress', 'server-gzip:5' ],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'server-lz4:5'],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'backup_flags' => [ '--compress', 'server-lz4:5' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'server-zstd:5'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'backup_flags' => [ '--compress', 'server-zstd:5' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => ['--compress', 'server-zstd:workers=3'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
- 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
- }
-);
+ 'backup_flags' => [ '--compress', 'server-zstd:workers=3' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'possibly_unsupported' =>
+ qr/could not set compression worker count to 3: Unsupported parameter/
+ });
for my $tc (@test_configuration)
{
my $backup_path = $primary->backup_dir . '/' . 'extract_backup';
- my $method = $tc->{'compression_method'};
+ my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 2
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
# Take backup with server compression enabled.
- my @backup = (
+ my @backup = (
'pg_basebackup', '-D', $backup_path,
'-Xfetch', '--no-sync', '-cfast', '-Fp');
- push @backup, @{$tc->{'backup_flags'}};
+ push @backup, @{ $tc->{'backup_flags'} };
my @verify = ('pg_verifybackup', '-e', $backup_path);
@@ -64,7 +65,7 @@ for my $tc (@test_configuration)
my $backup_stdout = '';
my $backup_stderr = '';
my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
- '2>', \$backup_stderr);
+ '2>', \$backup_stderr);
if ($backup_stdout ne '')
{
print "# standard output was:\n$backup_stdout";
@@ -73,8 +74,9 @@ for my $tc (@test_configuration)
{
print "# standard error was:\n$backup_stderr";
}
- if (! $backup_result && $tc->{'possibly_unsupported'} &&
- $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+ if ( !$backup_result
+ && $tc->{'possibly_unsupported'}
+ && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
{
skip "compression with $method not supported by this build", 2;
}
@@ -85,7 +87,7 @@ for my $tc (@test_configuration)
# Make sure that it verifies OK.
$primary->command_ok(\@verify,
- "backup verified, compression method \"$method\"");
+ "backup verified, compression method \"$method\"");
}
# Remove backup immediately to save disk space.
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index c1cd12cb065..77cb503784c 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -15,73 +15,74 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/client-backup';
+my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'client-gzip:5'],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'client-gzip:5' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'client-lz4:5'],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'client-lz4:5' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => [ '-d' ],
- 'output_file' => 'base.tar',
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => ['-d'],
+ 'output_file' => 'base.tar',
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'client-zstd:5'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:5' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => ['--compress', 'client-zstd:workers=3'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:workers=3' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
- 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
- }
-);
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'possibly_unsupported' =>
+ qr/could not set compression worker count to 3: Unsupported parameter/
+ });
for my $tc (@test_configuration)
{
my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 3
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
skip "no decompressor available for $method", 3
if exists $tc->{'decompress_program'}
&& (!defined $tc->{'decompress_program'}
- || $tc->{'decompress_program'} eq '');
+ || $tc->{'decompress_program'} eq '');
# Take a client-side backup.
- my @backup = (
+ my @backup = (
'pg_basebackup', '-D', $backup_path,
'-Xfetch', '--no-sync', '-cfast', '-Ft');
- push @backup, @{$tc->{'backup_flags'}};
+ push @backup, @{ $tc->{'backup_flags'} };
my $backup_stdout = '';
my $backup_stderr = '';
my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
- '2>', \$backup_stderr);
+ '2>', \$backup_stderr);
if ($backup_stdout ne '')
{
print "# standard output was:\n$backup_stdout";
@@ -90,8 +91,9 @@ for my $tc (@test_configuration)
{
print "# standard error was:\n$backup_stderr";
}
- if (! $backup_result && $tc->{'possibly_unsupported'} &&
- $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+ if ( !$backup_result
+ && $tc->{'possibly_unsupported'}
+ && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
{
skip "compression with $method not supported by this build", 3;
}
@@ -103,30 +105,31 @@ for my $tc (@test_configuration)
# Verify that the we got the files we expected.
my $backup_files = join(',',
sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
- my $expected_backup_files = join(',',
- sort ('backup_manifest', $tc->{'backup_archive'}));
- is($backup_files,$expected_backup_files,
+ my $expected_backup_files =
+ join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+ is($backup_files, $expected_backup_files,
"found expected backup files, compression $method");
# Decompress.
if (exists $tc->{'decompress_program'})
{
my @decompress = ($tc->{'decompress_program'});
- push @decompress, @{$tc->{'decompress_flags'}}
- if $tc->{'decompress_flags'};
+ push @decompress, @{ $tc->{'decompress_flags'} }
+ if $tc->{'decompress_flags'};
push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
push @decompress, $backup_path . '/' . $tc->{'output_file'}
- if $tc->{'output_file'};
+ if $tc->{'output_file'};
system_or_bail(@decompress);
}
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
# don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
- if (!defined $tar || $tar eq '');
+ if (!defined $tar || $tar eq '');
# Untar.
mkdir($extract_path);
@@ -134,8 +137,12 @@ for my $tc (@test_configuration)
'-C', $extract_path);
# Verify.
- $primary->command_ok([ 'pg_verifybackup', '-n',
- '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+ $primary->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "$backup_path/backup_manifest",
+ '-e', $extract_path
+ ],
"verify backup, compression $method");
}
diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c
index 4f265ef5460..3151cb5562b 100644
--- a/src/bin/pg_waldump/pg_waldump.c
+++ b/src/bin/pg_waldump/pg_waldump.c
@@ -695,7 +695,7 @@ main(int argc, char **argv)
XLogReaderState *xlogreader_state;
XLogDumpPrivate private;
XLogDumpConfig config;
- XLogStats stats;
+ XLogStats stats;
XLogRecord *record;
XLogRecPtr first_record;
char *waldir = NULL;
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 02f250f5119..79c0cd374d3 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -277,9 +277,9 @@ bool progress_timestamp = false; /* progress report with Unix time */
int nclients = 1; /* number of clients */
int nthreads = 1; /* number of threads */
bool is_connect; /* establish connection for each transaction */
-bool report_per_command = false; /* report per-command latencies, retries
- * after errors and failures (errors
- * without retrying) */
+bool report_per_command = false; /* report per-command latencies,
+ * retries after errors and failures
+ * (errors without retrying) */
int main_pid; /* main process id used in log filename */
/*
@@ -302,8 +302,8 @@ int main_pid; /* main process id used in log filename */
*/
uint32 max_tries = 1;
-bool failures_detailed = false; /* whether to group failures in reports
- * or logs by basic types */
+bool failures_detailed = false; /* whether to group failures in
+ * reports or logs by basic types */
const char *pghost = NULL;
const char *pgport = NULL;
@@ -349,8 +349,8 @@ typedef struct
/*
* The maximum number of variables that we can currently store in 'vars'
- * without having to reallocate more space. We must always have max_vars >=
- * nvars.
+ * without having to reallocate more space. We must always have max_vars
+ * >= nvars.
*/
int max_vars;
@@ -390,17 +390,17 @@ typedef struct StatsData
{
pg_time_usec_t start_time; /* interval start time, for aggregates */
- /*
- * Transactions are counted depending on their execution and outcome. First
- * a transaction may have started or not: skipped transactions occur under
- * --rate and --latency-limit when the client is too late to execute them.
- * Secondly, a started transaction may ultimately succeed or fail, possibly
- * after some retries when --max-tries is not one. Thus
+ /*----------
+ * Transactions are counted depending on their execution and outcome.
+ * First a transaction may have started or not: skipped transactions occur
+ * under --rate and --latency-limit when the client is too late to execute
+ * them. Secondly, a started transaction may ultimately succeed or fail,
+ * possibly after some retries when --max-tries is not one. Thus
*
* the number of all transactions =
* 'skipped' (it was too late to execute them) +
* 'cnt' (the number of successful transactions) +
- * failed (the number of failed transactions).
+ * 'failed' (the number of failed transactions).
*
* A successful transaction can have several unsuccessful tries before a
* successful run. Thus
@@ -419,11 +419,11 @@ typedef struct StatsData
* failed (the number of failed transactions) =
* 'serialization_failures' (they got a serialization error and were not
* successfully retried) +
- * 'deadlock_failures' (they got a deadlock error and were not successfully
- * retried).
+ * 'deadlock_failures' (they got a deadlock error and were not
+ * successfully retried).
*
- * If the transaction was retried after a serialization or a deadlock error
- * this does not guarantee that this retry was successful. Thus
+ * If the transaction was retried after a serialization or a deadlock
+ * error this does not guarantee that this retry was successful. Thus
*
* 'retries' (number of retries) =
* number of retries in all retried transactions =
@@ -433,18 +433,20 @@ typedef struct StatsData
* 'retried' (number of all retried transactions) =
* successfully retried transactions +
* failed transactions.
+ *----------
*/
int64 cnt; /* number of successful transactions, not
* including 'skipped' */
int64 skipped; /* number of transactions skipped under --rate
* and --latency-limit */
- int64 retries; /* number of retries after a serialization or a
- * deadlock error in all the transactions */
- int64 retried; /* number of all transactions that were retried
- * after a serialization or a deadlock error
- * (perhaps the last try was unsuccessful) */
- int64 serialization_failures; /* number of transactions that were not
- * successfully retried after a
+ int64 retries; /* number of retries after a serialization or
+ * a deadlock error in all the transactions */
+ int64 retried; /* number of all transactions that were
+ * retried after a serialization or a deadlock
+ * error (perhaps the last try was
+ * unsuccessful) */
+ int64 serialization_failures; /* number of transactions that were
+ * not successfully retried after a
* serialization error */
int64 deadlock_failures; /* number of transactions that were not
* successfully retried after a deadlock
@@ -559,16 +561,15 @@ typedef enum
* States for failed commands.
*
* If the SQL/meta command fails, in CSTATE_ERROR clean up after an error:
- * - clear the conditional stack;
- * - if we have an unterminated (possibly failed) transaction block, send
- * the rollback command to the server and wait for the result in
- * CSTATE_WAIT_ROLLBACK_RESULT. If something goes wrong with rolling back,
- * go to CSTATE_ABORTED.
+ * (1) clear the conditional stack; (2) if we have an unterminated
+ * (possibly failed) transaction block, send the rollback command to the
+ * server and wait for the result in CSTATE_WAIT_ROLLBACK_RESULT. If
+ * something goes wrong with rolling back, go to CSTATE_ABORTED.
*
- * But if everything is ok we are ready for future transactions: if this is
- * a serialization or deadlock error and we can re-execute the transaction
- * from the very beginning, go to CSTATE_RETRY; otherwise go to
- * CSTATE_FAILURE.
+ * But if everything is ok we are ready for future transactions: if this
+ * is a serialization or deadlock error and we can re-execute the
+ * transaction from the very beginning, go to CSTATE_RETRY; otherwise go
+ * to CSTATE_FAILURE.
*
* In CSTATE_RETRY report an error, set the same parameters for the
* transaction execution as in the previous tries and process the first
@@ -622,7 +623,7 @@ typedef struct
int command; /* command number in script */
/* client variables */
- Variables variables;
+ Variables variables;
/* various times about current transaction in microseconds */
pg_time_usec_t txn_scheduled; /* scheduled start time of transaction */
@@ -633,19 +634,20 @@ typedef struct
bool prepared[MAX_SCRIPTS]; /* whether client prepared the script */
/*
- * For processing failures and repeating transactions with serialization or
- * deadlock errors:
+ * For processing failures and repeating transactions with serialization
+ * or deadlock errors:
*/
- EStatus estatus; /* the error status of the current transaction
- * execution; this is ESTATUS_NO_ERROR if there were
- * no errors */
- pg_prng_state random_state; /* random state */
- uint32 tries; /* how many times have we already tried the
+ EStatus estatus; /* the error status of the current transaction
+ * execution; this is ESTATUS_NO_ERROR if
+ * there were no errors */
+ pg_prng_state random_state; /* random state */
+ uint32 tries; /* how many times have we already tried the
* current transaction? */
/* per client collected stats */
- int64 cnt; /* client transaction count, for -t; skipped and
- * failed transactions are also counted here */
+ int64 cnt; /* client transaction count, for -t; skipped
+ * and failed transactions are also counted
+ * here */
} CState;
/*
@@ -771,7 +773,7 @@ static ParsedScript sql_script[MAX_SCRIPTS]; /* SQL script files */
static int num_scripts; /* number of scripts in sql_script[] */
static int64 total_weight = 0;
-static bool verbose_errors = false; /* print verbose messages of all errors */
+static bool verbose_errors = false; /* print verbose messages of all errors */
/* Builtin test scripts */
typedef struct BuiltinScript
@@ -3050,7 +3052,7 @@ commandError(CState *st, const char *message)
{
Assert(sql_script[st->use_file].commands[st->command]->type == SQL_COMMAND);
pg_log_info("client %d got an error in command %d (SQL) of script %d; %s",
- st->id, st->command, st->use_file, message);
+ st->id, st->command, st->use_file, message);
}
/* return a script number with a weighted choice. */
@@ -3289,8 +3291,8 @@ readCommandResponse(CState *st, MetaCommand meta, char *varprefix)
case PGRES_NONFATAL_ERROR:
case PGRES_FATAL_ERROR:
- st->estatus = getSQLErrorStatus(
- PQresultErrorField(res, PG_DIAG_SQLSTATE));
+ st->estatus = getSQLErrorStatus(PQresultErrorField(res,
+ PG_DIAG_SQLSTATE));
if (canRetryError(st->estatus))
{
if (verbose_errors)
@@ -3397,13 +3399,15 @@ doRetry(CState *st, pg_time_usec_t *now)
Assert(max_tries || latency_limit || duration > 0);
/*
- * We cannot retry the error if we have reached the maximum number of tries.
+ * We cannot retry the error if we have reached the maximum number of
+ * tries.
*/
if (max_tries && st->tries >= max_tries)
return false;
/*
- * We cannot retry the error if we spent too much time on this transaction.
+ * We cannot retry the error if we spent too much time on this
+ * transaction.
*/
if (latency_limit)
{
@@ -3432,14 +3436,15 @@ discardUntilSync(CState *st)
if (!PQpipelineSync(st->con))
{
pg_log_error("client %d aborted: failed to send a pipeline sync",
- st->id);
+ st->id);
return 0;
}
/* receive PGRES_PIPELINE_SYNC and null following it */
- for(;;)
+ for (;;)
{
- PGresult *res = PQgetResult(st->con);
+ PGresult *res = PQgetResult(st->con);
+
if (PQresultStatus(res) == PGRES_PIPELINE_SYNC)
{
PQclear(res);
@@ -3484,9 +3489,10 @@ getTransactionStatus(PGconn *con)
/* fall through */
case PQTRANS_ACTIVE:
default:
+
/*
- * We cannot find out whether we are in a transaction block or not.
- * Internal error which should never occur.
+ * We cannot find out whether we are in a transaction block or
+ * not. Internal error which should never occur.
*/
pg_log_error("unexpected transaction status %d", tx_status);
return TSTATUS_OTHER_ERROR;
@@ -3513,8 +3519,8 @@ printVerboseErrorMessages(CState *st, pg_time_usec_t *now, bool is_retry)
printfPQExpBuffer(buf, "client %d ", st->id);
appendPQExpBuffer(buf, "%s",
(is_retry ?
- "repeats the transaction after the error" :
- "ends the failed transaction"));
+ "repeats the transaction after the error" :
+ "ends the failed transaction"));
appendPQExpBuffer(buf, " (try %u", st->tries);
/* Print max_tries if it is not unlimitted. */
@@ -3522,8 +3528,8 @@ printVerboseErrorMessages(CState *st, pg_time_usec_t *now, bool is_retry)
appendPQExpBuffer(buf, "/%u", max_tries);
/*
- * If the latency limit is used, print a percentage of the current transaction
- * latency from the latency limit.
+ * If the latency limit is used, print a percentage of the current
+ * transaction latency from the latency limit.
*/
if (latency_limit)
{
@@ -3619,8 +3625,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
/*
* It is the first try to run this transaction. Remember the
- * random state: maybe it will get an error and we will need to
- * run it again.
+ * random state: maybe it will get an error and we will need
+ * to run it again.
*/
st->random_state = st->cs_func_rs;
@@ -3998,8 +4004,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
}
/*
- * Check if we have a (failed) transaction block or not, and
- * roll it back if any.
+ * Check if we have a (failed) transaction block or not,
+ * and roll it back if any.
*/
tstatus = getTransactionStatus(st->con);
if (tstatus == TSTATUS_IN_BLOCK)
@@ -4017,9 +4023,9 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
else if (tstatus == TSTATUS_IDLE)
{
/*
- * If time is over, we're done;
- * otherwise, check if we can retry the error.
- */
+ * If time is over, we're done; otherwise, check if we
+ * can retry the error.
+ */
st->state = timer_exceeded ? CSTATE_FINISHED :
doRetry(st, &now) ? CSTATE_RETRY : CSTATE_FAILURE;
}
@@ -4039,7 +4045,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
*/
case CSTATE_WAIT_ROLLBACK_RESULT:
{
- PGresult *res;
+ PGresult *res;
pg_log_debug("client %d receiving", st->id);
if (!PQconsumeInput(st->con))
@@ -4050,7 +4056,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
break;
}
if (PQisBusy(st->con))
- return; /* don't have the whole result yet */
+ return; /* don't have the whole result yet */
/*
* Read and discard the query result;
@@ -4066,8 +4072,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
Assert(res == NULL);
/*
- * If time is over, we're done;
- * otherwise, check if we can retry the error.
+ * If time is over, we're done; otherwise, check
+ * if we can retry the error.
*/
st->state = timer_exceeded ? CSTATE_FINISHED :
doRetry(st, &now) ? CSTATE_RETRY : CSTATE_FAILURE;
@@ -4089,7 +4095,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
command = sql_script[st->use_file].commands[st->command];
/*
- * Inform that the transaction will be retried after the error.
+ * Inform that the transaction will be retried after the
+ * error.
*/
if (verbose_errors)
printVerboseErrorMessages(st, &now, true);
@@ -4099,8 +4106,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
command->retries++;
/*
- * Reset the random state as they were at the beginning
- * of the transaction.
+ * Reset the random state as they were at the beginning of the
+ * transaction.
*/
st->cs_func_rs = st->random_state;
@@ -4188,8 +4195,9 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
st->state = CSTATE_CHOOSE_SCRIPT;
/*
- * Ensure that we always return on this point, so as to avoid
- * an infinite loop if the script only contains meta commands.
+ * Ensure that we always return on this point, so as to
+ * avoid an infinite loop if the script only contains meta
+ * commands.
*/
return;
}
@@ -4518,10 +4526,10 @@ doLog(TState *thread, CState *st,
lag_max = agg->lag.max;
}
fprintf(logfile, " %.0f %.0f %.0f %.0f",
- lag_sum,
- lag_sum2,
- lag_min,
- lag_max);
+ lag_sum,
+ lag_sum2,
+ lag_min,
+ lag_max);
if (latency_limit)
skipped = agg->skipped;
@@ -4588,7 +4596,7 @@ processXactStats(TState *thread, CState *st, pg_time_usec_t *now,
double latency = 0.0,
lag = 0.0;
bool detailed = progress || throttle_delay || latency_limit ||
- use_log || per_script_stats;
+ use_log || per_script_stats;
if (detailed && !skipped && st->estatus == ESTATUS_NO_ERROR)
{
@@ -4838,7 +4846,7 @@ initGenerateDataClientSide(PGconn *con)
PGresult *res;
int i;
int64 k;
- char *copy_statement;
+ char *copy_statement;
/* used to track elapsed time and estimate of the remaining time */
pg_time_usec_t start;
@@ -6365,7 +6373,7 @@ printResults(StatsData *total,
StatsData *sstats = &sql_script[i].stats;
int64 script_failures = getFailures(sstats);
int64 script_total_cnt =
- sstats->cnt + sstats->skipped + script_failures;
+ sstats->cnt + sstats->skipped + script_failures;
printf("SQL script %d: %s\n"
" - weight: %d (targets %.1f%% of total)\n"
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index ca71f968dc4..2c0dc369652 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -1202,17 +1202,21 @@ check_pgbench_logs($bdir, '001_pgbench_log_3', 1, 10, 10,
# abortion of the client if the script contains an incomplete transaction block
$node->pgbench(
- '--no-vacuum', 2, [ qr{processed: 1/10} ],
- [ qr{client 0 aborted: end of script reached without completing the last transaction} ],
+ '--no-vacuum',
+ 2,
+ [qr{processed: 1/10}],
+ [
+ qr{client 0 aborted: end of script reached without completing the last transaction}
+ ],
'incomplete transaction block',
{ '001_pgbench_incomplete_transaction_block' => q{BEGIN;SELECT 1;} });
# Test the concurrent update in the table row and deadlocks.
$node->safe_psql('postgres',
- 'CREATE UNLOGGED TABLE first_client_table (value integer); '
- . 'CREATE UNLOGGED TABLE xy (x integer, y integer); '
- . 'INSERT INTO xy VALUES (1, 2);');
+ 'CREATE UNLOGGED TABLE first_client_table (value integer); '
+ . 'CREATE UNLOGGED TABLE xy (x integer, y integer); '
+ . 'INSERT INTO xy VALUES (1, 2);');
# Serialization error and retry
@@ -1221,7 +1225,7 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=repeatable\\ read";
# Check that we have a serialization error and the same random value of the
# delta variable in the next try
my $err_pattern =
- "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
+ "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
. "client \\2 got an error in command 3 \\(SQL\\) of script 0; "
. "ERROR: could not serialize access due to concurrent update\\b.*"
. "\\1";
@@ -1229,9 +1233,12 @@ my $err_pattern =
$node->pgbench(
"-n -c 2 -t 1 -d --verbose-errors --max-tries 2",
0,
- [ qr{processed: 2/2\b}, qr{number of transactions retried: 1\b},
- qr{total number of retries: 1\b} ],
- [ qr/$err_pattern/s ],
+ [
+ qr{processed: 2/2\b},
+ qr{number of transactions retried: 1\b},
+ qr{total number of retries: 1\b}
+ ],
+ [qr/$err_pattern/s],
'concurrent update with retrying',
{
'001_pgbench_serialization' => q{
@@ -1304,15 +1311,18 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=read\\ committed";
# Check that we have a deadlock error
$err_pattern =
- "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
+ "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
. "ERROR: deadlock detected\\b";
$node->pgbench(
"-n -c 2 -t 1 --max-tries 2 --verbose-errors",
0,
- [ qr{processed: 2/2\b}, qr{number of transactions retried: 1\b},
- qr{total number of retries: 1\b} ],
- [ qr{$err_pattern} ],
+ [
+ qr{processed: 2/2\b},
+ qr{number of transactions retried: 1\b},
+ qr{total number of retries: 1\b}
+ ],
+ [qr{$err_pattern}],
'deadlock with retrying',
{
'001_pgbench_deadlock' => q{
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index a5074c70d9d..50bde7dd0fc 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -37,7 +37,7 @@ sub pgbench_scripts
local $Test::Builder::Level = $Test::Builder::Level + 1;
my ($opts, $stat, $out, $err, $name, $files) = @_;
- my @cmd = ('pgbench', split /\s+/, $opts);
+ my @cmd = ('pgbench', split /\s+/, $opts);
my @filenames = ();
if (defined $files)
{
@@ -196,7 +196,9 @@ my @options = (
[
'an infinite number of tries',
'--max-tries 0',
- [qr{an unlimited number of transaction tries can only be used with --latency-limit or a duration}]
+ [
+ qr{an unlimited number of transaction tries can only be used with --latency-limit or a duration}
+ ]
],
# logging sub-options
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index feb1d547d4d..9b140badeb9 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -32,8 +32,8 @@
static bool DescribeQuery(const char *query, double *elapsed_msec);
static bool ExecQueryUsingCursor(const char *query, double *elapsed_msec);
-static int ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
- bool is_watch, const printQueryOpt *opt, FILE *printQueryFout);
+static int ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
+ bool is_watch, const printQueryOpt *opt, FILE *printQueryFout);
static bool command_no_begin(const char *query);
static bool is_select_command(const char *query);
@@ -482,7 +482,7 @@ ClearOrSaveResult(PGresult *result)
static void
ClearOrSaveAllResults(void)
{
- PGresult *result;
+ PGresult *result;
while ((result = PQgetResult(pset.db)) != NULL)
ClearOrSaveResult(result);
@@ -697,7 +697,8 @@ PrintQueryTuples(const PGresult *result, const printQueryOpt *opt, FILE *printQu
}
else
{
- FILE *fout = printQueryFout ? printQueryFout : pset.queryFout;
+ FILE *fout = printQueryFout ? printQueryFout : pset.queryFout;
+
printQuery(result, opt ? opt : &pset.popt, fout, false, pset.logfile);
if (ferror(fout))
{
@@ -907,9 +908,9 @@ HandleCopyResult(PGresult **resultp)
&& (copystream != NULL);
/*
- * Suppress status printing if the report would go to the same
- * place as the COPY data just went. Note this doesn't
- * prevent error reporting, since handleCopyOut did that.
+ * Suppress status printing if the report would go to the same place
+ * as the COPY data just went. Note this doesn't prevent error
+ * reporting, since handleCopyOut did that.
*/
if (copystream == pset.queryFout)
{
@@ -943,8 +944,8 @@ HandleCopyResult(PGresult **resultp)
ResetCancelConn();
/*
- * Replace the PGRES_COPY_OUT/IN result with COPY command's exit
- * status, or with NULL if we want to suppress printing anything.
+ * Replace the PGRES_COPY_OUT/IN result with COPY command's exit status,
+ * or with NULL if we want to suppress printing anything.
*/
PQclear(*resultp);
*resultp = copy_result;
@@ -1069,7 +1070,7 @@ PrintQueryResult(PGresult *result, bool last, bool is_watch, const printQueryOpt
*/
struct t_notice_messages
{
- PQExpBufferData messages[2];
+ PQExpBufferData messages[2];
int current;
};
@@ -1080,6 +1081,7 @@ static void
AppendNoticeMessage(void *arg, const char *msg)
{
struct t_notice_messages *notices = arg;
+
appendPQExpBufferStr(&notices->messages[notices->current], msg);
}
@@ -1089,7 +1091,8 @@ AppendNoticeMessage(void *arg, const char *msg)
static void
ShowNoticeMessage(struct t_notice_messages *notices)
{
- PQExpBufferData *current = &notices->messages[notices->current];
+ PQExpBufferData *current = &notices->messages[notices->current];
+
if (*current->data != '\0')
pg_log_info("%s", current->data);
resetPQExpBuffer(current);
@@ -1234,6 +1237,7 @@ SendQuery(const char *query)
break;
case PQTRANS_INTRANS:
+
/*
* Release our savepoint, but do nothing if they are messing
* with savepoints themselves
@@ -1472,7 +1476,7 @@ DescribeQuery(const char *query, double *elapsed_msec)
*/
static int
ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
- bool is_watch, const printQueryOpt *opt, FILE *printQueryFout)
+ bool is_watch, const printQueryOpt *opt, FILE *printQueryFout)
{
bool timing = pset.timing;
bool success;
@@ -1527,8 +1531,8 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
if (!AcceptResult(result, false))
{
/*
- * Some error occured, either a server-side failure or
- * a failure to submit the command string. Record that.
+ * Some error occured, either a server-side failure or a failure
+ * to submit the command string. Record that.
*/
const char *error = PQresultErrorMessage(result);
@@ -1551,10 +1555,12 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
if (result_status == PGRES_COPY_BOTH ||
result_status == PGRES_COPY_OUT ||
result_status == PGRES_COPY_IN)
+
/*
- * For some obscure reason PQgetResult does *not* return a NULL in copy
- * cases despite the result having been cleared, but keeps returning an
- * "empty" result that we have to ignore manually.
+ * For some obscure reason PQgetResult does *not* return a
+ * NULL in copy cases despite the result having been cleared,
+ * but keeps returning an "empty" result that we have to
+ * ignore manually.
*/
result = NULL;
else
@@ -1565,12 +1571,13 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
else if (svpt_gone_p && !*svpt_gone_p)
{
/*
- * Check if the user ran any command that would destroy our internal
- * savepoint: If the user did COMMIT AND CHAIN, RELEASE or ROLLBACK, our
- * savepoint is gone. If they issued a SAVEPOINT, releasing ours would
- * remove theirs.
+ * Check if the user ran any command that would destroy our
+ * internal savepoint: If the user did COMMIT AND CHAIN, RELEASE
+ * or ROLLBACK, our savepoint is gone. If they issued a SAVEPOINT,
+ * releasing ours would remove theirs.
*/
const char *cmd = PQcmdStatus(result);
+
*svpt_gone_p = (strcmp(cmd, "COMMIT") == 0 ||
strcmp(cmd, "SAVEPOINT") == 0 ||
strcmp(cmd, "RELEASE") == 0 ||
@@ -1614,11 +1621,11 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
/*
* Get timing measure before printing the last result.
*
- * It will include the display of previous results, if any.
- * This cannot be helped because the server goes on processing
- * further queries anyway while the previous ones are being displayed.
- * The parallel execution of the client display hides the server time
- * when it is shorter.
+ * It will include the display of previous results, if any. This
+ * cannot be helped because the server goes on processing further
+ * queries anyway while the previous ones are being displayed. The
+ * parallel execution of the client display hides the server time when
+ * it is shorter.
*
* With combined queries, timing must be understood as an upper bound
* of the time spent processing them.
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 31df8b759cd..1a5d924a23f 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -109,9 +109,9 @@ describeAggregates(const char *pattern, bool verbose, bool showSystem)
" AND n.nspname <> 'information_schema'\n");
if (!validateSQLNamePattern(&buf, pattern, true, false,
- "n.nspname", "p.proname", NULL,
- "pg_catalog.pg_function_is_visible(p.oid)",
- NULL, 3))
+ "n.nspname", "p.proname", NULL,
+ "pg_catalog.pg_function_is_visible(p.oid)",
+ NULL, 3))
return false;
appendPQExpBufferStr(&buf, "ORDER BY 1, 2, 4;");
@@ -6002,7 +6002,7 @@ validateSQLNamePattern(PQExpBuffer buf, const char *pattern, bool have_where,
const char *visibilityrule, bool *added_clause,
int maxparts)
{
- PQExpBufferData dbbuf;
+ PQExpBufferData dbbuf;
int dotcnt;
bool added;
@@ -6021,7 +6021,7 @@ validateSQLNamePattern(PQExpBuffer buf, const char *pattern, bool have_where,
return false;
}
- if (maxparts > 1 && dotcnt == maxparts-1)
+ if (maxparts > 1 && dotcnt == maxparts - 1)
{
if (PQdb(pset.db) == NULL)
{
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 98996d9a379..90e69d7cdba 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -36,9 +36,8 @@ sub psql_fails_like
my ($node, $sql, $expected_stderr, $test_name) = @_;
# Use the context of a WAL sender, some of the tests rely on that.
- my ($ret, $stdout, $stderr) = $node->psql(
- 'postgres', $sql,
- replication => 'database');
+ my ($ret, $stdout, $stderr) =
+ $node->psql('postgres', $sql, replication => 'database');
isnt($ret, 0, "$test_name: exit code not 0");
like($stderr, $expected_stderr, "$test_name: matches");
@@ -69,9 +68,9 @@ max_wal_senders = 4
});
$node->start;
-psql_like($node, '\copyright', qr/Copyright/, '\copyright');
-psql_like($node, '\help', qr/ALTER/, '\help without arguments');
-psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument');
+psql_like($node, '\copyright', qr/Copyright/, '\copyright');
+psql_like($node, '\help', qr/ALTER/, '\help without arguments');
+psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument');
# Test clean handling of unsupported replication command responses
psql_fails_like(
@@ -116,16 +115,16 @@ NOTIFY foo, 'bar';",
'notification with payload');
# test behavior and output on server crash
-my ($ret, $out, $err) = $node->psql(
- 'postgres',
- "SELECT 'before' AS running;\n" .
- "SELECT pg_terminate_backend(pg_backend_pid());\n" .
- "SELECT 'AFTER' AS not_running;\n");
+my ($ret, $out, $err) = $node->psql('postgres',
+ "SELECT 'before' AS running;\n"
+ . "SELECT pg_terminate_backend(pg_backend_pid());\n"
+ . "SELECT 'AFTER' AS not_running;\n");
is($ret, 2, 'server crash: psql exit code');
like($out, qr/before/, 'server crash: output before crash');
ok($out !~ qr/AFTER/, 'server crash: no output after crash');
-is($err, 'psql:<stdin>:2: FATAL: terminating connection due to administrator command
+is( $err,
+ 'psql:<stdin>:2: FATAL: terminating connection due to administrator command
psql:<stdin>:2: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@@ -149,34 +148,46 @@ psql_like(
# \errverbose: The normal way, using a cursor by setting FETCH_COUNT,
# and using \gdesc. Test them all.
-like(($node->psql('postgres', "SELECT error;\n\\errverbose", on_error_stop => 0))[2],
- qr/\A^psql:<stdin>:1: ERROR: .*$
+like(
+ ( $node->psql(
+ 'postgres',
+ "SELECT error;\n\\errverbose",
+ on_error_stop => 0))[2],
+ qr/\A^psql:<stdin>:1: ERROR: .*$
^LINE 1: SELECT error;$
^ *^.*$
^psql:<stdin>:2: error: ERROR: [0-9A-Z]{5}: .*$
^LINE 1: SELECT error;$
^ *^.*$
^LOCATION: .*$/m,
- '\errverbose after normal query with error');
-
-like(($node->psql('postgres', "\\set FETCH_COUNT 1\nSELECT error;\n\\errverbose", on_error_stop => 0))[2],
- qr/\A^psql:<stdin>:2: ERROR: .*$
+ '\errverbose after normal query with error');
+
+like(
+ ( $node->psql(
+ 'postgres',
+ "\\set FETCH_COUNT 1\nSELECT error;\n\\errverbose",
+ on_error_stop => 0))[2],
+ qr/\A^psql:<stdin>:2: ERROR: .*$
^LINE 2: SELECT error;$
^ *^.*$
^psql:<stdin>:3: error: ERROR: [0-9A-Z]{5}: .*$
^LINE 2: SELECT error;$
^ *^.*$
^LOCATION: .*$/m,
- '\errverbose after FETCH_COUNT query with error');
-
-like(($node->psql('postgres', "SELECT error\\gdesc\n\\errverbose", on_error_stop => 0))[2],
- qr/\A^psql:<stdin>:1: ERROR: .*$
+ '\errverbose after FETCH_COUNT query with error');
+
+like(
+ ( $node->psql(
+ 'postgres',
+ "SELECT error\\gdesc\n\\errverbose",
+ on_error_stop => 0))[2],
+ qr/\A^psql:<stdin>:1: ERROR: .*$
^LINE 1: SELECT error$
^ *^.*$
^psql:<stdin>:2: error: ERROR: [0-9A-Z]{5}: .*$
^LINE 1: SELECT error$
^ *^.*$
^LOCATION: .*$/m,
- '\errverbose after \gdesc with error');
+ '\errverbose after \gdesc with error');
done_testing();
diff --git a/src/bin/psql/t/010_tab_completion.pl b/src/bin/psql/t/010_tab_completion.pl
index 2711935a2cc..2eea515e871 100644
--- a/src/bin/psql/t/010_tab_completion.pl
+++ b/src/bin/psql/t/010_tab_completion.pl
@@ -212,10 +212,7 @@ check_completion(
clear_line();
# check case folding
-check_completion(
- "select * from TAB\t",
- qr/tab1 /,
- "automatically fold case");
+check_completion("select * from TAB\t", qr/tab1 /, "automatically fold case");
clear_query();
@@ -228,15 +225,10 @@ check_completion("\\DRD\t", qr/drds /, "complete \\DRD<tab> to \\drds");
clear_line();
# check completion of a schema-qualified name
-check_completion(
- "select * from pub\t",
- qr/public\./,
- "complete schema when relevant");
+check_completion("select * from pub\t",
+ qr/public\./, "complete schema when relevant");
-check_completion(
- "tab\t",
- qr/tab1 /,
- "complete schema-qualified name");
+check_completion("tab\t", qr/tab1 /, "complete schema-qualified name");
clear_query();
@@ -339,15 +331,10 @@ check_completion(
clear_line();
# check timezone name completion
-check_completion(
- "SET timezone TO am\t",
- qr|'America/|,
- "offer partial timezone name");
+check_completion("SET timezone TO am\t",
+ qr|'America/|, "offer partial timezone name");
-check_completion(
- "new_\t",
- qr|New_York|,
- "complete partial timezone name");
+check_completion("new_\t", qr|New_York|, "complete partial timezone name");
clear_line();
diff --git a/src/bin/psql/t/020_cancel.pl b/src/bin/psql/t/020_cancel.pl
index d57d3429521..f4dbd36c391 100644
--- a/src/bin/psql/t/020_cancel.pl
+++ b/src/bin/psql/t/020_cancel.pl
@@ -21,7 +21,8 @@ $node->start;
# the process from IPC::Run. As a workaround, we have psql print its
# own PID (which is the parent of the shell launched by psql) to a
# file.
-SKIP: {
+SKIP:
+{
skip "cancel test requires a Unix shell", 2 if $windows_os;
local %ENV = $node->_get_env();
@@ -31,31 +32,38 @@ SKIP: {
# Test whether shell supports $PPID. It's part of POSIX, but some
# pre-/non-POSIX shells don't support it (e.g., NetBSD).
$stdin = "\\! echo \$PPID";
- IPC::Run::run(['psql', '-X', '-v', 'ON_ERROR_STOP=1'], '<', \$stdin, '>', \$stdout, '2>', \$stderr);
+ IPC::Run::run([ 'psql', '-X', '-v', 'ON_ERROR_STOP=1' ],
+ '<', \$stdin, '>', \$stdout, '2>', \$stderr);
$stdout =~ /^\d+$/ or skip "shell apparently does not support \$PPID", 2;
# Now start the real test
- my $h = IPC::Run::start(['psql', '-X', '-v', 'ON_ERROR_STOP=1'], \$stdin, \$stdout, \$stderr);
+ my $h = IPC::Run::start([ 'psql', '-X', '-v', 'ON_ERROR_STOP=1' ],
+ \$stdin, \$stdout, \$stderr);
# Get the PID
$stdout = '';
$stderr = '';
- $stdin = "\\! echo \$PPID >$tempdir/psql.pid\n";
+ $stdin = "\\! echo \$PPID >$tempdir/psql.pid\n";
pump $h while length $stdin;
my $count;
my $psql_pid;
- until (-s "$tempdir/psql.pid" and ($psql_pid = PostgreSQL::Test::Utils::slurp_file("$tempdir/psql.pid")) =~ /^\d+\n/s)
+ until (
+ -s "$tempdir/psql.pid"
+ and ($psql_pid =
+ PostgreSQL::Test::Utils::slurp_file("$tempdir/psql.pid")) =~
+ /^\d+\n/s)
{
($count++ < 100 * $PostgreSQL::Test::Utils::timeout_default)
or die "pid file did not appear";
- usleep(10_000)
+ usleep(10_000);
}
# Send sleep command and wait until the server has registered it
$stdin = "select pg_sleep($PostgreSQL::Test::Utils::timeout_default);\n";
pump $h while length $stdin;
- $node->poll_query_until('postgres', q{SELECT (SELECT count(*) FROM pg_stat_activity WHERE query ~ '^select pg_sleep') > 0;})
- or die "timed out";
+ $node->poll_query_until('postgres',
+ q{SELECT (SELECT count(*) FROM pg_stat_activity WHERE query ~ '^select pg_sleep') > 0;}
+ ) or die "timed out";
# Send cancel request
kill 'INT', $psql_pid;
@@ -63,7 +71,10 @@ SKIP: {
my $result = finish $h;
ok(!$result, 'query failed as expected');
- like($stderr, qr/canceling statement due to user request/, 'query was canceled');
+ like(
+ $stderr,
+ qr/canceling statement due to user request/,
+ 'query was canceled');
}
done_testing();
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 588c0841fee..55af9eb04e4 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -826,7 +826,7 @@ static const SchemaQuery Query_for_list_of_mergetargets = {
.selcondition =
"c.relkind IN (" CppAsString2(RELKIND_RELATION) ", "
CppAsString2(RELKIND_PARTITIONED_TABLE) ") ",
- .viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
+ .viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
.namespace = "c.relnamespace",
.result = "c.relname",
};
@@ -1827,6 +1827,7 @@ psql_completion(const char *text, int start, int end)
(HeadMatches("ALTER", "PUBLICATION", MatchAny, "ADD|SET", "TABLE") &&
ends_with(prev_wd, ',')))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables);
+
/*
* "ALTER PUBLICATION <name> SET TABLE <name> WHERE (" - complete with
* table attributes
diff --git a/src/bin/scripts/t/020_createdb.pl b/src/bin/scripts/t/020_createdb.pl
index 18f6e313d57..78733f64d25 100644
--- a/src/bin/scripts/t/020_createdb.pl
+++ b/src/bin/scripts/t/020_createdb.pl
@@ -35,12 +35,19 @@ if ($ENV{with_icu} eq 'yes')
'create database with ICU fails without ICU locale specified');
$node->issues_sql_like(
- [ 'createdb', '-T', 'template0', '--locale-provider=icu', '--icu-locale=en', 'foobar5' ],
+ [
+ 'createdb', '-T',
+ 'template0', '--locale-provider=icu',
+ '--icu-locale=en', 'foobar5'
+ ],
qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
'create database with ICU locale specified');
$node->command_fails(
- [ 'createdb', '-T', 'template0', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', 'foobarX' ],
+ [
+ 'createdb', '-T', 'template0', '--locale-provider=icu',
+ '--icu-locale=@colNumeric=lower', 'foobarX'
+ ],
'fails for invalid ICU locale');
}
else
@@ -53,7 +60,8 @@ else
$node->command_fails([ 'createdb', 'foobar1' ],
'fails if database already exists');
-$node->command_fails([ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
+$node->command_fails(
+ [ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
'fails for invalid locale provider');
# Check use of templates with shared dependencies copied from the template.
diff --git a/src/common/compression.c b/src/common/compression.c
index 632fae61442..da3c291c0ff 100644
--- a/src/common/compression.c
+++ b/src/common/compression.c
@@ -72,7 +72,7 @@ get_compress_algorithm_name(pg_compress_algorithm algorithm)
/* no default, to provoke compiler warnings if values are added */
}
Assert(false);
- return "???"; /* placate compiler */
+ return "???"; /* placate compiler */
}
/*
@@ -93,7 +93,7 @@ get_compress_algorithm_name(pg_compress_algorithm algorithm)
*/
void
parse_compress_specification(pg_compress_algorithm algorithm, char *specification,
- pg_compress_specification *result)
+ pg_compress_specification *result)
{
int bare_level;
char *bare_level_endp;
diff --git a/src/common/cryptohash_openssl.c b/src/common/cryptohash_openssl.c
index 8e76ffdee99..2fb29b1f855 100644
--- a/src/common/cryptohash_openssl.c
+++ b/src/common/cryptohash_openssl.c
@@ -185,6 +185,7 @@ pg_cryptohash_init(pg_cryptohash_ctx *ctx)
{
ctx->errreason = SSLerrmessage(ERR_get_error());
ctx->error = PG_CRYPTOHASH_ERROR_OPENSSL;
+
/*
* The OpenSSL error queue should normally be empty since we've
* consumed an error, but cipher initialization can in FIPS-enabled
diff --git a/src/common/exec.c b/src/common/exec.c
index 289b1f26b86..9da588daf91 100644
--- a/src/common/exec.c
+++ b/src/common/exec.c
@@ -35,7 +35,7 @@
/* Inhibit mingw CRT's auto-globbing of command line arguments */
#if defined(WIN32) && !defined(_MSC_VER)
-extern int _CRT_glob = 0; /* 0 turns off globbing; 1 turns it on */
+extern int _CRT_glob = 0; /* 0 turns off globbing; 1 turns it on */
#endif
/*
diff --git a/src/include/access/amapi.h b/src/include/access/amapi.h
index a382551a981..0b89f399f08 100644
--- a/src/include/access/amapi.h
+++ b/src/include/access/amapi.h
@@ -245,7 +245,7 @@ typedef struct IndexAmRoutine
/* does AM use maintenance_work_mem? */
bool amusemaintenanceworkmem;
/* does AM block HOT update? */
- bool amhotblocking;
+ bool amhotblocking;
/* OR of parallel vacuum flags. See vacuum.h for flags. */
uint8 amparallelvacuumoptions;
/* type of data stored in index, or InvalidOid if variable */
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 5d7f7fd800e..abf62d9df79 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -188,7 +188,7 @@ extern int heap_page_prune(Relation relation, Buffer buffer,
struct GlobalVisState *vistest,
TransactionId old_snap_xmin,
TimestampTz old_snap_ts_ts,
- int *nnewlpdead,
+ int *nnewlpdead,
OffsetNumber *off_loc);
extern void heap_page_prune_execute(Buffer buffer,
OffsetNumber *redirected, int nredirected,
diff --git a/src/include/access/rmgr.h b/src/include/access/rmgr.h
index e465800e445..3b6a497e1b4 100644
--- a/src/include/access/rmgr.h
+++ b/src/include/access/rmgr.h
@@ -26,7 +26,7 @@ typedef enum RmgrIds
{
#include "access/rmgrlist.h"
RM_NEXT_ID
-} RmgrIds;
+} RmgrIds;
#undef PG_RMGR
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index 837fe7de0b9..4794941df31 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -231,9 +231,10 @@ typedef struct xl_xact_assignment
typedef struct xl_xact_xinfo
{
/*
- * Even though we right now only require two bytes of space in xinfo we use
- * four so following records don't have to care about alignment. Commit
- * records can be large, so copying large portions isn't attractive.
+ * Even though we right now only require two bytes of space in xinfo we
+ * use four so following records don't have to care about alignment.
+ * Commit records can be large, so copying large portions isn't
+ * attractive.
*/
uint32 xinfo;
} xl_xact_xinfo;
@@ -274,7 +275,7 @@ typedef struct xl_xact_stats_item
typedef struct xl_xact_stats_items
{
- int nitems;
+ int nitems;
xl_xact_stats_item items[FLEXIBLE_ARRAY_MEMBER];
} xl_xact_stats_items;
#define MinSizeOfXactStatsItems offsetof(xl_xact_stats_items, items)
@@ -378,7 +379,7 @@ typedef struct xl_xact_parsed_commit
char twophase_gid[GIDSIZE]; /* only for 2PC */
int nabortrels; /* only for 2PC */
RelFileNode *abortnodes; /* only for 2PC */
- int nabortstats; /* only for 2PC */
+ int nabortstats; /* only for 2PC */
xl_xact_stats_item *abortstats; /* only for 2PC */
XLogRecPtr origin_lsn;
diff --git a/src/include/access/xlogstats.h b/src/include/access/xlogstats.h
index be59eece225..7eb4370f2d5 100644
--- a/src/include/access/xlogstats.h
+++ b/src/include/access/xlogstats.h
@@ -20,20 +20,20 @@
typedef struct XLogRecStats
{
- uint64 count;
- uint64 rec_len;
- uint64 fpi_len;
+ uint64 count;
+ uint64 rec_len;
+ uint64 fpi_len;
} XLogRecStats;
typedef struct XLogStats
{
- uint64 count;
+ uint64 count;
#ifdef FRONTEND
XLogRecPtr startptr;
XLogRecPtr endptr;
#endif
- XLogRecStats rmgr_stats[RM_MAX_ID + 1];
- XLogRecStats record_stats[RM_MAX_ID + 1][MAX_XLINFO_TYPES];
+ XLogRecStats rmgr_stats[RM_MAX_ID + 1];
+ XLogRecStats record_stats[RM_MAX_ID + 1][MAX_XLINFO_TYPES];
} XLogStats;
extern void XLogRecGetLen(XLogReaderState *record, uint32 *rec_len,
diff --git a/src/include/access/xlogutils.h b/src/include/access/xlogutils.h
index 5fcbbc136f9..c9d0b75a01b 100644
--- a/src/include/access/xlogutils.h
+++ b/src/include/access/xlogutils.h
@@ -78,7 +78,7 @@ typedef enum
/* Private data of the read_local_xlog_page_no_wait callback. */
typedef struct ReadLocalXLogPageNoWaitPrivate
{
- bool end_of_wal; /* true, when end of WAL is reached */
+ bool end_of_wal; /* true, when end of WAL is reached */
} ReadLocalXLogPageNoWaitPrivate;
extern XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record,
diff --git a/src/include/catalog/objectaccess.h b/src/include/catalog/objectaccess.h
index ac6adcb7300..567ab63e855 100644
--- a/src/include/catalog/objectaccess.h
+++ b/src/include/catalog/objectaccess.h
@@ -129,10 +129,10 @@ typedef void (*object_access_hook_type) (ObjectAccessType access,
void *arg);
typedef void (*object_access_hook_type_str) (ObjectAccessType access,
- Oid classId,
- const char *objectStr,
- int subId,
- void *arg);
+ Oid classId,
+ const char *objectStr,
+ int subId,
+ void *arg);
/* Plugin sets this variable to a suitable hook function. */
extern PGDLLIMPORT object_access_hook_type object_access_hook;
@@ -152,12 +152,12 @@ extern void RunFunctionExecuteHook(Oid objectId);
/* String versions */
extern void RunObjectPostCreateHookStr(Oid classId, const char *objectStr, int subId,
- bool is_internal);
+ bool is_internal);
extern void RunObjectDropHookStr(Oid classId, const char *objectStr, int subId,
- int dropflags);
+ int dropflags);
extern void RunObjectTruncateHookStr(const char *objectStr);
extern void RunObjectPostAlterHookStr(Oid classId, const char *objectStr, int subId,
- Oid auxiliaryId, bool is_internal);
+ Oid auxiliaryId, bool is_internal);
extern bool RunNamespaceSearchHookStr(const char *objectStr, bool ereport_on_violation);
extern void RunFunctionExecuteHookStr(const char *objectStr);
diff --git a/src/include/catalog/pg_aggregate.dat b/src/include/catalog/pg_aggregate.dat
index 62156346cf4..86cc6507983 100644
--- a/src/include/catalog/pg_aggregate.dat
+++ b/src/include/catalog/pg_aggregate.dat
@@ -563,7 +563,8 @@
{ aggfnoid => 'range_agg(anyrange)', aggtransfn => 'range_agg_transfn',
aggfinalfn => 'range_agg_finalfn', aggfinalextra => 't',
aggtranstype => 'internal' },
-{ aggfnoid => 'range_agg(anymultirange)', aggtransfn => 'multirange_agg_transfn',
+{ aggfnoid => 'range_agg(anymultirange)',
+ aggtransfn => 'multirange_agg_transfn',
aggfinalfn => 'multirange_agg_finalfn', aggfinalextra => 't',
aggtranstype => 'internal' },
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index 304e8c18d52..e1f4eefa220 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -223,7 +223,7 @@ DECLARE_INDEX(pg_class_tblspc_relfilenode_index, 3455, ClassTblspcRelfilenodeInd
(relkind) == RELKIND_TOASTVALUE || \
(relkind) == RELKIND_MATVIEW)
-extern int errdetail_relkind_not_supported(char relkind);
+extern int errdetail_relkind_not_supported(char relkind);
#endif /* EXPOSE_TO_CLIENT_CODE */
diff --git a/src/include/catalog/pg_collation.h b/src/include/catalog/pg_collation.h
index c642c3bb952..2190ccb5b88 100644
--- a/src/include/catalog/pg_collation.h
+++ b/src/include/catalog/pg_collation.h
@@ -41,7 +41,7 @@ CATALOG(pg_collation,3456,CollationRelationId)
int32 collencoding; /* encoding for this collation; -1 = "all" */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text collcollate BKI_DEFAULT(_null_); /* LC_COLLATE setting */
- text collctype BKI_DEFAULT(_null_); /* LC_CTYPE setting */
+ text collctype BKI_DEFAULT(_null_); /* LC_CTYPE setting */
text colliculocale BKI_DEFAULT(_null_); /* ICU locale ID */
text collversion BKI_DEFAULT(_null_); /* provider-dependent
* version of collation
diff --git a/src/include/catalog/pg_database.dat b/src/include/catalog/pg_database.dat
index 05873f74f68..47dcbfb343b 100644
--- a/src/include/catalog/pg_database.dat
+++ b/src/include/catalog/pg_database.dat
@@ -14,7 +14,8 @@
{ oid => '1', oid_symbol => 'Template1DbOid',
descr => 'default template for new databases',
- datname => 'template1', encoding => 'ENCODING', datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
+ datname => 'template1', encoding => 'ENCODING',
+ datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
datallowconn => 't', datconnlimit => '-1', datfrozenxid => '0',
datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE',
datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE', datacl => '_null_' },
diff --git a/src/include/catalog/pg_parameter_acl.h b/src/include/catalog/pg_parameter_acl.h
index 263079c9e1d..aa7264a4bbb 100644
--- a/src/include/catalog/pg_parameter_acl.h
+++ b/src/include/catalog/pg_parameter_acl.h
@@ -46,7 +46,7 @@ CATALOG(pg_parameter_acl,8924,ParameterAclRelationId) BKI_SHARED_RELATION
* the format of pg_parameter_acl relation.
* ----------------
*/
-typedef FormData_pg_parameter_acl *Form_pg_parameter_acl;
+typedef FormData_pg_parameter_acl * Form_pg_parameter_acl;
DECLARE_TOAST_WITH_MACRO(pg_parameter_acl, 8925, 8926, PgParameterAclToastTable, PgParameterAclToastIndex);
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 6d378ff7859..c1bc1c3cce1 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -5383,9 +5383,8 @@
prosrc => 'pg_stat_have_stats' },
{ oid => '8523', descr => 'statistics: information about subscription stats',
- proname => 'pg_stat_get_subscription_stats',
- provolatile => 's', proparallel => 'r',
- prorettype => 'record', proargtypes => 'oid',
+ proname => 'pg_stat_get_subscription_stats', provolatile => 's',
+ proparallel => 'r', prorettype => 'record', proargtypes => 'oid',
proallargtypes => '{oid,oid,int8,int8,timestamptz}',
proargmodes => '{i,o,o,o,o}',
proargnames => '{subid,subid,apply_error_count,sync_error_count,stats_reset}',
@@ -5655,8 +5654,8 @@
proargnames => '{wal_records,wal_fpi,wal_bytes,wal_buffers_full,wal_write,wal_sync,wal_write_time,wal_sync_time,stats_reset}',
prosrc => 'pg_stat_get_wal' },
{ oid => '9085', descr => 'statistics: information about WAL prefetching',
- proname => 'pg_stat_get_recovery_prefetch', prorows => '1', provolatile => 'v',
- proretset => 't', prorettype => 'record', proargtypes => '',
+ proname => 'pg_stat_get_recovery_prefetch', prorows => '1', proretset => 't',
+ provolatile => 'v', prorettype => 'record', proargtypes => '',
proallargtypes => '{timestamptz,int8,int8,int8,int8,int8,int8,int4,int4,int4}',
proargmodes => '{o,o,o,o,o,o,o,o,o,o}',
proargnames => '{stats_reset,prefetch,hit,skip_init,skip_new,skip_fpw,skip_rep,wal_distance,block_distance,io_depth}',
@@ -6298,8 +6297,7 @@
{ oid => '2739', descr => 'finish taking an online backup',
proname => 'pg_backup_stop', provolatile => 'v', proparallel => 'r',
prorettype => 'record', proargtypes => 'bool',
- proallargtypes => '{bool,pg_lsn,text,text}',
- proargmodes => '{i,o,o,o}',
+ proallargtypes => '{bool,pg_lsn,text,text}', proargmodes => '{i,o,o,o}',
proargnames => '{wait_for_archive,lsn,labelfile,spcmapfile}',
prosrc => 'pg_backup_stop' },
{ oid => '3436', descr => 'promote standby server',
@@ -8800,15 +8798,18 @@
prosrc => 'json_object_agg_transfn' },
{ oid => '8175', descr => 'json object aggregate transition function',
proname => 'json_object_agg_strict_transfn', proisstrict => 'f',
- provolatile => 's', prorettype => 'internal', proargtypes => 'internal any any',
+ provolatile => 's', prorettype => 'internal',
+ proargtypes => 'internal any any',
prosrc => 'json_object_agg_strict_transfn' },
{ oid => '8176', descr => 'json object aggregate transition function',
proname => 'json_object_agg_unique_transfn', proisstrict => 'f',
- provolatile => 's', prorettype => 'internal', proargtypes => 'internal any any',
+ provolatile => 's', prorettype => 'internal',
+ proargtypes => 'internal any any',
prosrc => 'json_object_agg_unique_transfn' },
{ oid => '8177', descr => 'json object aggregate transition function',
proname => 'json_object_agg_unique_strict_transfn', proisstrict => 'f',
- provolatile => 's', prorettype => 'internal', proargtypes => 'internal any any',
+ provolatile => 's', prorettype => 'internal',
+ proargtypes => 'internal any any',
prosrc => 'json_object_agg_unique_strict_transfn' },
{ oid => '3196', descr => 'json object aggregate final function',
proname => 'json_object_agg_finalfn', proisstrict => 'f',
@@ -8822,7 +8823,8 @@
proname => 'json_object_agg_strict', prokind => 'a', proisstrict => 'f',
provolatile => 's', prorettype => 'json', proargtypes => 'any any',
prosrc => 'aggregate_dummy' },
-{ oid => '8179', descr => 'aggregate input into a json object with unique keys',
+{ oid => '8179',
+ descr => 'aggregate input into a json object with unique keys',
proname => 'json_object_agg_unique', prokind => 'a', proisstrict => 'f',
provolatile => 's', prorettype => 'json', proargtypes => 'any any',
prosrc => 'aggregate_dummy' },
@@ -10192,15 +10194,15 @@
# SQL-spec window functions
{ oid => '3100', descr => 'row number within partition',
proname => 'row_number', prosupport => 'window_row_number_support',
- prokind => 'w', proisstrict => 'f', prorettype => 'int8',
- proargtypes => '', prosrc => 'window_row_number' },
+ prokind => 'w', proisstrict => 'f', prorettype => 'int8', proargtypes => '',
+ prosrc => 'window_row_number' },
{ oid => '8799', descr => 'planner support for row_number run condition',
proname => 'window_row_number_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'window_row_number_support' },
{ oid => '3101', descr => 'integer rank with gaps',
- proname => 'rank', prosupport => 'window_rank_support',
- prokind => 'w', proisstrict => 'f', prorettype => 'int8',
- proargtypes => '', prosrc => 'window_rank' },
+ proname => 'rank', prosupport => 'window_rank_support', prokind => 'w',
+ proisstrict => 'f', prorettype => 'int8', proargtypes => '',
+ prosrc => 'window_rank' },
{ oid => '8800', descr => 'planner support for rank run condition',
proname => 'window_rank_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'window_rank_support' },
@@ -10737,8 +10739,9 @@
prorettype => 'anymultirange', proargtypes => 'anyrange',
prosrc => 'aggregate_dummy' },
{ oid => '8205', descr => 'aggregate transition function',
- proname => 'multirange_agg_transfn', proisstrict => 'f', prorettype => 'internal',
- proargtypes => 'internal anymultirange', prosrc => 'multirange_agg_transfn' },
+ proname => 'multirange_agg_transfn', proisstrict => 'f',
+ prorettype => 'internal', proargtypes => 'internal anymultirange',
+ prosrc => 'multirange_agg_transfn' },
{ oid => '8206', descr => 'aggregate final function',
proname => 'multirange_agg_finalfn', proisstrict => 'f',
prorettype => 'anymultirange', proargtypes => 'internal anymultirange',
@@ -10756,8 +10759,8 @@
prorettype => 'anymultirange', proargtypes => 'anymultirange',
prosrc => 'aggregate_dummy' },
{ oid => '1293', descr => 'expand multirange to set of ranges',
- proname => 'unnest', prorows => '100',
- proretset => 't', prorettype => 'anyrange', proargtypes => 'anymultirange',
+ proname => 'unnest', prorows => '100', proretset => 't',
+ prorettype => 'anyrange', proargtypes => 'anymultirange',
prosrc => 'multirange_unnest' },
# date, time, timestamp constructors
diff --git a/src/include/catalog/pg_publication.h b/src/include/catalog/pg_publication.h
index 29b18566657..48205ba4293 100644
--- a/src/include/catalog/pg_publication.h
+++ b/src/include/catalog/pg_publication.h
@@ -87,8 +87,8 @@ typedef struct PublicationDesc
bool rf_valid_for_delete;
/*
- * true if the columns are part of the replica identity or the publication actions
- * do not include UPDATE or DELETE.
+ * true if the columns are part of the replica identity or the publication
+ * actions do not include UPDATE or DELETE.
*/
bool cols_valid_for_update;
bool cols_valid_for_delete;
diff --git a/src/include/catalog/pg_statistic_ext_data.h b/src/include/catalog/pg_statistic_ext_data.h
index b01e6205974..0ea3c41b5b0 100644
--- a/src/include/catalog/pg_statistic_ext_data.h
+++ b/src/include/catalog/pg_statistic_ext_data.h
@@ -50,7 +50,7 @@ CATALOG(pg_statistic_ext_data,3429,StatisticExtDataRelationId)
* the format of pg_statistic_ext_data relation.
* ----------------
*/
-typedef FormData_pg_statistic_ext_data * Form_pg_statistic_ext_data;
+typedef FormData_pg_statistic_ext_data *Form_pg_statistic_ext_data;
DECLARE_TOAST(pg_statistic_ext_data, 3430, 3431);
diff --git a/src/include/catalog/renumber_oids.pl b/src/include/catalog/renumber_oids.pl
index ba8c69c87e9..1e33450b444 100755
--- a/src/include/catalog/renumber_oids.pl
+++ b/src/include/catalog/renumber_oids.pl
@@ -140,7 +140,9 @@ foreach my $input_file (@header_files)
$changed = 1;
}
}
- elsif ($line =~ m/^(DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*)(\d+)(,\s*)(\d+)(,\s*\w+,\s*\w+)\)/)
+ elsif ($line =~
+ m/^(DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*)(\d+)(,\s*)(\d+)(,\s*\w+,\s*\w+)\)/
+ )
{
my $oid2 = $2;
my $oid4 = $4;
@@ -148,19 +150,21 @@ foreach my $input_file (@header_files)
{
$oid2 = $maphash{$oid2};
my $repl = $1 . $oid2 . $3 . $oid4 . $5 . ")";
- $line =~ s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
+ $line =~
+ s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
$changed = 1;
}
if (exists $maphash{$oid4})
{
$oid4 = $maphash{$oid4};
my $repl = $1 . $oid2 . $3 . $oid4 . $5 . ")";
- $line =~ s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
+ $line =~
+ s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
$changed = 1;
}
}
- elsif (
- $line =~ m/^(DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*\w+,\s*)(\d+)(,\s*.+)\)/)
+ elsif ($line =~
+ m/^(DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*\w+,\s*)(\d+)(,\s*.+)\)/)
{
if (exists $maphash{$4})
{
diff --git a/src/include/commands/publicationcmds.h b/src/include/commands/publicationcmds.h
index ae87caf089d..57df3fc1e3d 100644
--- a/src/include/commands/publicationcmds.h
+++ b/src/include/commands/publicationcmds.h
@@ -32,8 +32,8 @@ extern ObjectAddress AlterPublicationOwner(const char *name, Oid newOwnerId);
extern void AlterPublicationOwner_oid(Oid pubid, Oid newOwnerId);
extern void InvalidatePublicationRels(List *relids);
extern bool pub_rf_contains_invalid_column(Oid pubid, Relation relation,
- List *ancestors, bool pubviaroot);
+ List *ancestors, bool pubviaroot);
extern bool pub_collist_contains_invalid_column(Oid pubid, Relation relation,
- List *ancestors, bool pubviaroot);
+ List *ancestors, bool pubviaroot);
#endif /* PUBLICATIONCMDS_H */
diff --git a/src/include/executor/execExpr.h b/src/include/executor/execExpr.h
index 9df70e6f06f..e34db8c93cb 100644
--- a/src/include/executor/execExpr.h
+++ b/src/include/executor/execExpr.h
@@ -684,49 +684,49 @@ typedef struct ExprEvalStep
{
int category;
Oid outfuncid;
- } *arg_type_cache; /* cache for datum_to_json[b]() */
+ } *arg_type_cache; /* cache for datum_to_json[b]() */
int nargs;
} json_constructor;
/* for EEOP_IS_JSON */
struct
{
- JsonIsPredicate *pred; /* original expression node */
+ JsonIsPredicate *pred; /* original expression node */
} is_json;
/* for EEOP_JSONEXPR */
struct
{
- JsonExpr *jsexpr; /* original expression node */
+ JsonExpr *jsexpr; /* original expression node */
struct
{
- FmgrInfo func; /* typinput function for output type */
+ FmgrInfo func; /* typinput function for output type */
Oid typioparam;
- } input; /* I/O info for output type */
+ } input; /* I/O info for output type */
NullableDatum
- *formatted_expr, /* formatted context item value */
- *res_expr, /* result item */
- *coercion_expr, /* input for JSON item coercion */
- *pathspec; /* path specification value */
+ *formatted_expr, /* formatted context item value */
+ *res_expr, /* result item */
+ *coercion_expr, /* input for JSON item coercion */
+ *pathspec; /* path specification value */
- ExprState *result_expr; /* coerced to output type */
+ ExprState *result_expr; /* coerced to output type */
ExprState *default_on_empty; /* ON EMPTY DEFAULT expression */
ExprState *default_on_error; /* ON ERROR DEFAULT expression */
- List *args; /* passing arguments */
+ List *args; /* passing arguments */
- void *cache; /* cache for json_populate_type() */
+ void *cache; /* cache for json_populate_type() */
struct JsonCoercionsState
{
struct JsonCoercionState
{
- JsonCoercion *coercion; /* coercion expression */
- ExprState *estate; /* coercion expression state */
- } null,
+ JsonCoercion *coercion; /* coercion expression */
+ ExprState *estate; /* coercion expression state */
+ } null,
string,
- numeric,
+ numeric ,
boolean,
date,
time,
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 873772f1883..d68a6b9d28c 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -266,7 +266,7 @@ ExecProcNode(PlanState *node)
extern ExprState *ExecInitExpr(Expr *node, PlanState *parent);
extern ExprState *ExecInitExprWithParams(Expr *node, ParamListInfo ext_params);
extern ExprState *ExecInitExprWithCaseValue(Expr *node, PlanState *parent,
- Datum *caseval, bool *casenull);
+ Datum *caseval, bool *casenull);
extern ExprState *ExecInitQual(List *qual, PlanState *parent);
extern ExprState *ExecInitCheck(List *qual, PlanState *parent);
extern List *ExecInitExprList(List *nodes, PlanState *parent);
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index d55abc5414d..5314b737052 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -473,7 +473,7 @@ typedef struct
FMGR_ABI_EXTRA, \
}
-StaticAssertDecl(sizeof(FMGR_ABI_EXTRA) <= sizeof(((Pg_magic_struct*)0)->abi_extra),
+StaticAssertDecl(sizeof(FMGR_ABI_EXTRA) <= sizeof(((Pg_magic_struct *) 0)->abi_extra),
"FMGR_ABI_EXTRA too long");
/*
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 94b191f8ae0..57288013795 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -2158,8 +2158,8 @@ typedef struct MemoizeState
* by bit, false when using hash equality ops */
MemoizeInstrumentation stats; /* execution statistics */
SharedMemoizeInfo *shared_info; /* statistics for parallel workers */
- Bitmapset *keyparamids; /* Param->paramids of expressions belonging to
- * param_exprs */
+ Bitmapset *keyparamids; /* Param->paramids of expressions belonging to
+ * param_exprs */
} MemoizeState;
/* ----------------
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 340d28f4e1a..b3b407579b0 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -705,7 +705,8 @@ extern bool equal(const void *a, const void *b);
*/
typedef double Selectivity; /* fraction of tuples a qualifier will pass */
typedef double Cost; /* execution cost (in page-access units) */
-typedef double Cardinality; /* (estimated) number of rows or other integer count */
+typedef double Cardinality; /* (estimated) number of rows or other integer
+ * count */
/*
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 9a716f3794f..73f635b4553 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -301,6 +301,7 @@ typedef struct A_Expr
typedef struct A_Const
{
NodeTag type;
+
/*
* Value nodes are inline for performance. You can treat 'val' as a node,
* as in IsA(&val, Integer). 'val' is not valid if isnull is true.
@@ -763,7 +764,8 @@ typedef struct DefElem
NodeTag type;
char *defnamespace; /* NULL if unqualified name */
char *defname;
- Node *arg; /* typically Integer, Float, String, or TypeName */
+ Node *arg; /* typically Integer, Float, String, or
+ * TypeName */
DefElemAction defaction; /* unspecified action, or SET/ADD/DROP */
int location; /* token location, or -1 if unknown */
} DefElem;
@@ -1151,7 +1153,7 @@ typedef struct RangeTblEntry
* Fields valid for ENR RTEs (else NULL/zero):
*/
char *enrname; /* name of ephemeral named relation */
- Cardinality enrtuples; /* estimated or actual from caller */
+ Cardinality enrtuples; /* estimated or actual from caller */
/*
* Fields valid in all RTEs:
@@ -1667,7 +1669,7 @@ typedef struct JsonFuncExpr
JsonOutput *output; /* output clause, if specified */
JsonBehavior *on_empty; /* ON EMPTY behavior, if specified */
JsonBehavior *on_error; /* ON ERROR behavior, if specified */
- JsonWrapper wrapper; /* array wrapper behavior (JSON_QUERY only) */
+ JsonWrapper wrapper; /* array wrapper behavior (JSON_QUERY only) */
bool omit_quotes; /* omit or keep quotes? (JSON_QUERY only) */
int location; /* token location, or -1 if unknown */
} JsonFuncExpr;
@@ -1680,17 +1682,17 @@ typedef struct JsonTableColumn
{
NodeTag type;
JsonTableColumnType coltype; /* column type */
- char *name; /* column name */
- TypeName *typeName; /* column type name */
- char *pathspec; /* path specification, if any */
- char *pathname; /* path name, if any */
- JsonFormat *format; /* JSON format clause, if specified */
- JsonWrapper wrapper; /* WRAPPER behavior for formatted columns */
- bool omit_quotes; /* omit or keep quotes on scalar strings? */
- List *columns; /* nested columns */
- JsonBehavior *on_empty; /* ON EMPTY behavior */
- JsonBehavior *on_error; /* ON ERROR behavior */
- int location; /* token location, or -1 if unknown */
+ char *name; /* column name */
+ TypeName *typeName; /* column type name */
+ char *pathspec; /* path specification, if any */
+ char *pathname; /* path name, if any */
+ JsonFormat *format; /* JSON format clause, if specified */
+ JsonWrapper wrapper; /* WRAPPER behavior for formatted columns */
+ bool omit_quotes; /* omit or keep quotes on scalar strings? */
+ List *columns; /* nested columns */
+ JsonBehavior *on_empty; /* ON EMPTY behavior */
+ JsonBehavior *on_error; /* ON ERROR behavior */
+ int location; /* token location, or -1 if unknown */
} JsonTableColumn;
/*
@@ -1725,12 +1727,12 @@ typedef struct JsonTablePlan JsonTablePlan;
struct JsonTablePlan
{
NodeTag type;
- JsonTablePlanType plan_type; /* plan type */
+ JsonTablePlanType plan_type; /* plan type */
JsonTablePlanJoinType join_type; /* join type (for joined plan only) */
- JsonTablePlan *plan1; /* first joined plan */
- JsonTablePlan *plan2; /* second joined plan */
- char *pathname; /* path name (for simple plan only) */
- int location; /* token location, or -1 if unknown */
+ JsonTablePlan *plan1; /* first joined plan */
+ JsonTablePlan *plan2; /* second joined plan */
+ char *pathname; /* path name (for simple plan only) */
+ int location; /* token location, or -1 if unknown */
};
/*
@@ -1740,13 +1742,13 @@ struct JsonTablePlan
typedef struct JsonTable
{
NodeTag type;
- JsonCommon *common; /* common JSON path syntax fields */
- List *columns; /* list of JsonTableColumn */
- JsonTablePlan *plan; /* join plan, if specified */
- JsonBehavior *on_error; /* ON ERROR behavior, if specified */
- Alias *alias; /* table alias in FROM clause */
- bool lateral; /* does it have LATERAL prefix? */
- int location; /* token location, or -1 if unknown */
+ JsonCommon *common; /* common JSON path syntax fields */
+ List *columns; /* list of JsonTableColumn */
+ JsonTablePlan *plan; /* join plan, if specified */
+ JsonBehavior *on_error; /* ON ERROR behavior, if specified */
+ Alias *alias; /* table alias in FROM clause */
+ bool lateral; /* does it have LATERAL prefix? */
+ int location; /* token location, or -1 if unknown */
} JsonTable;
/*
@@ -1807,7 +1809,7 @@ typedef struct JsonObjectConstructor
NodeTag type;
List *exprs; /* list of JsonKeyValue pairs */
JsonOutput *output; /* RETURNING clause, if specified */
- bool absent_on_null; /* skip NULL values? */
+ bool absent_on_null; /* skip NULL values? */
bool unique; /* check key uniqueness? */
int location; /* token location, or -1 if unknown */
} JsonObjectConstructor;
@@ -1821,7 +1823,7 @@ typedef struct JsonArrayConstructor
NodeTag type;
List *exprs; /* list of JsonValueExpr elements */
JsonOutput *output; /* RETURNING clause, if specified */
- bool absent_on_null; /* skip NULL elements? */
+ bool absent_on_null; /* skip NULL elements? */
int location; /* token location, or -1 if unknown */
} JsonArrayConstructor;
@@ -1835,7 +1837,7 @@ typedef struct JsonArrayQueryConstructor
Node *query; /* subquery */
JsonOutput *output; /* RETURNING clause, if specified */
JsonFormat *format; /* FORMAT clause for subquery, if specified */
- bool absent_on_null; /* skip NULL elements? */
+ bool absent_on_null; /* skip NULL elements? */
int location; /* token location, or -1 if unknown */
} JsonArrayQueryConstructor;
@@ -1861,9 +1863,9 @@ typedef struct JsonAggConstructor
typedef struct JsonObjectAgg
{
NodeTag type;
- JsonAggConstructor *constructor; /* common fields */
+ JsonAggConstructor *constructor; /* common fields */
JsonKeyValue *arg; /* object key-value pair */
- bool absent_on_null; /* skip NULL values? */
+ bool absent_on_null; /* skip NULL values? */
bool unique; /* check key uniqueness? */
} JsonObjectAgg;
@@ -1874,9 +1876,9 @@ typedef struct JsonObjectAgg
typedef struct JsonArrayAgg
{
NodeTag type;
- JsonAggConstructor *constructor; /* common fields */
+ JsonAggConstructor *constructor; /* common fields */
JsonValueExpr *arg; /* array element expression */
- bool absent_on_null; /* skip NULL elements? */
+ bool absent_on_null; /* skip NULL elements? */
} JsonArrayAgg;
@@ -2621,7 +2623,7 @@ typedef struct Constraint
char generated_when; /* ALWAYS or BY DEFAULT */
/* Fields used for unique constraints (UNIQUE and PRIMARY KEY): */
- bool nulls_not_distinct; /* null treatment for UNIQUE constraints */
+ bool nulls_not_distinct; /* null treatment for UNIQUE constraints */
List *keys; /* String nodes naming referenced key
* column(s) */
List *including; /* String nodes naming referenced nonkey
@@ -3250,7 +3252,7 @@ typedef struct IndexStmt
SubTransactionId oldFirstRelfilenodeSubid; /* rd_firstRelfilenodeSubid of
* oldNode */
bool unique; /* is index unique? */
- bool nulls_not_distinct; /* null treatment for UNIQUE constraints */
+ bool nulls_not_distinct; /* null treatment for UNIQUE constraints */
bool primary; /* is index a primary key? */
bool isconstraint; /* is it for a pkey/unique constraint? */
bool deferrable; /* is the constraint DEFERRABLE? */
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index 244d1e11974..a6e5db4eecc 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -335,11 +335,11 @@ struct PlannerInfo
MemoryContext planner_cxt; /* context holding PlannerInfo */
- Cardinality total_table_pages; /* # of pages in all non-dummy tables of
+ Cardinality total_table_pages; /* # of pages in all non-dummy tables of
* query */
- Selectivity tuple_fraction; /* tuple_fraction passed to query_planner */
- Cardinality limit_tuples; /* limit_tuples passed to query_planner */
+ Selectivity tuple_fraction; /* tuple_fraction passed to query_planner */
+ Cardinality limit_tuples; /* limit_tuples passed to query_planner */
Index qual_security_level; /* minimum security_level for quals */
/* Note: qual_security_level is zero if there are no securityQuals */
@@ -682,7 +682,7 @@ typedef struct RelOptInfo
Relids relids; /* set of base relids (rangetable indexes) */
/* size estimates generated by planner */
- Cardinality rows; /* estimated number of result tuples */
+ Cardinality rows; /* estimated number of result tuples */
/* per-relation planner control flags */
bool consider_startup; /* keep cheap-startup-cost paths? */
@@ -719,7 +719,7 @@ typedef struct RelOptInfo
List *indexlist; /* list of IndexOptInfo */
List *statlist; /* list of StatisticExtInfo */
BlockNumber pages; /* size estimates derived from pg_class */
- Cardinality tuples;
+ Cardinality tuples;
double allvisfrac;
Bitmapset *eclass_indexes; /* Indexes in PlannerInfo's eq_classes list of
* ECs that mention this rel */
@@ -842,7 +842,7 @@ struct IndexOptInfo
/* index-size statistics (from pg_class and elsewhere) */
BlockNumber pages; /* number of disk pages in index */
- Cardinality tuples; /* number of index tuples in index */
+ Cardinality tuples; /* number of index tuples in index */
int tree_height; /* index tree height, or -1 if unknown */
/* index descriptor information */
@@ -1151,7 +1151,7 @@ typedef struct ParamPathInfo
NodeTag type;
Relids ppi_req_outer; /* rels supplying parameters used by path */
- Cardinality ppi_rows; /* estimated number of result tuples */
+ Cardinality ppi_rows; /* estimated number of result tuples */
List *ppi_clauses; /* join clauses available from outer rels */
} ParamPathInfo;
@@ -1201,7 +1201,7 @@ typedef struct Path
int parallel_workers; /* desired # of workers; 0 = not parallel */
/* estimated size/costs for path (see costsize.c for more info) */
- Cardinality rows; /* estimated number of result tuples */
+ Cardinality rows; /* estimated number of result tuples */
Cost startup_cost; /* cost expended before fetching any tuples */
Cost total_cost; /* total cost (assuming all tuples fetched) */
@@ -1464,7 +1464,7 @@ typedef struct AppendPath
List *subpaths; /* list of component Paths */
/* Index of first partial path in subpaths; list_length(subpaths) if none */
int first_partial_path;
- Cardinality limit_tuples; /* hard limit on output tuples, or -1 */
+ Cardinality limit_tuples; /* hard limit on output tuples, or -1 */
} AppendPath;
#define IS_DUMMY_APPEND(p) \
@@ -1486,7 +1486,7 @@ typedef struct MergeAppendPath
{
Path path;
List *subpaths; /* list of component Paths */
- Cardinality limit_tuples; /* hard limit on output tuples, or -1 */
+ Cardinality limit_tuples; /* hard limit on output tuples, or -1 */
} MergeAppendPath;
/*
@@ -1529,7 +1529,7 @@ typedef struct MemoizePath
* complete after caching the first record. */
bool binary_mode; /* true when cache key should be compared bit
* by bit, false when using hash equality ops */
- Cardinality calls; /* expected number of rescans */
+ Cardinality calls; /* expected number of rescans */
uint32 est_entries; /* The maximum number of entries that the
* planner expects will fit in the cache, or 0
* if unknown */
@@ -1681,7 +1681,7 @@ typedef struct HashPath
JoinPath jpath;
List *path_hashclauses; /* join clauses used for hashing */
int num_batches; /* number of batches expected */
- Cardinality inner_rows_total; /* total inner rows expected */
+ Cardinality inner_rows_total; /* total inner rows expected */
} HashPath;
/*
@@ -1784,7 +1784,7 @@ typedef struct AggPath
Path *subpath; /* path representing input source */
AggStrategy aggstrategy; /* basic strategy, see nodes.h */
AggSplit aggsplit; /* agg-splitting mode, see nodes.h */
- Cardinality numGroups; /* estimated number of groups in input */
+ Cardinality numGroups; /* estimated number of groups in input */
uint64 transitionSpace; /* for pass-by-ref transition data */
List *groupClause; /* a list of SortGroupClause's */
List *qual; /* quals (HAVING quals), if any */
@@ -1798,7 +1798,7 @@ typedef struct GroupingSetData
{
NodeTag type;
List *set; /* grouping set as list of sortgrouprefs */
- Cardinality numGroups; /* est. number of result groups */
+ Cardinality numGroups; /* est. number of result groups */
} GroupingSetData;
typedef struct RollupData
@@ -1807,7 +1807,7 @@ typedef struct RollupData
List *groupClause; /* applicable subset of parse->groupClause */
List *gsets; /* lists of integer indexes into groupClause */
List *gsets_data; /* list of GroupingSetData */
- Cardinality numGroups; /* est. number of result groups */
+ Cardinality numGroups; /* est. number of result groups */
bool hashable; /* can be hashed */
bool is_hashed; /* to be implemented as a hashagg */
} RollupData;
@@ -1861,7 +1861,7 @@ typedef struct SetOpPath
List *distinctList; /* SortGroupClauses identifying target cols */
AttrNumber flagColIdx; /* where is the flag column, if any */
int firstFlag; /* flag value for first input relation */
- Cardinality numGroups; /* estimated number of groups in input */
+ Cardinality numGroups; /* estimated number of groups in input */
} SetOpPath;
/*
@@ -1874,7 +1874,7 @@ typedef struct RecursiveUnionPath
Path *rightpath;
List *distinctList; /* SortGroupClauses identifying target cols */
int wtParam; /* ID of Param representing work table */
- Cardinality numGroups; /* estimated number of groups in input */
+ Cardinality numGroups; /* estimated number of groups in input */
} RecursiveUnionPath;
/*
@@ -2632,7 +2632,7 @@ typedef struct
typedef struct
{
bool limit_needed;
- Cardinality limit_tuples;
+ Cardinality limit_tuples;
int64 count_est;
int64 offset_est;
} FinalPathExtraData;
@@ -2663,15 +2663,15 @@ typedef struct JoinCostWorkspace
Cost inner_rescan_run_cost;
/* private for cost_mergejoin code */
- Cardinality outer_rows;
- Cardinality inner_rows;
- Cardinality outer_skip_rows;
- Cardinality inner_skip_rows;
+ Cardinality outer_rows;
+ Cardinality inner_rows;
+ Cardinality outer_skip_rows;
+ Cardinality inner_skip_rows;
/* private for cost_hashjoin code */
int numbuckets;
int numbatches;
- Cardinality inner_rows_total;
+ Cardinality inner_rows_total;
} JoinCostWorkspace;
/*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index e43e360d9be..e319e83bd82 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -121,7 +121,7 @@ typedef struct Plan
/*
* planner's estimate of result size of this plan step
*/
- Cardinality plan_rows; /* number of rows plan is expected to emit */
+ Cardinality plan_rows; /* number of rows plan is expected to emit */
int plan_width; /* average row width in bytes */
/*
@@ -834,7 +834,7 @@ typedef struct Memoize
uint32 est_entries; /* The maximum number of entries that the
* planner expects will fit in the cache, or 0
* if unknown */
- Bitmapset *keyparamids; /* paramids from param_exprs */
+ Bitmapset *keyparamids; /* paramids from param_exprs */
} Memoize;
/* ----------------
@@ -1013,7 +1013,7 @@ typedef struct Hash
AttrNumber skewColumn; /* outer join key's column #, or zero */
bool skewInherit; /* is outer join rel an inheritance tree? */
/* all other info is in the parent HashJoin node */
- Cardinality rows_total; /* estimate total rows if parallel_aware */
+ Cardinality rows_total; /* estimate total rows if parallel_aware */
} Hash;
/* ----------------
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 66d32fc0062..66e179c4356 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -1251,7 +1251,7 @@ typedef enum JsonExprOp
JSON_VALUE_OP, /* JSON_VALUE() */
JSON_QUERY_OP, /* JSON_QUERY() */
JSON_EXISTS_OP, /* JSON_EXISTS() */
- JSON_TABLE_OP /* JSON_TABLE() */
+ JSON_TABLE_OP /* JSON_TABLE() */
} JsonExprOp;
/*
@@ -1274,7 +1274,8 @@ typedef enum JsonFormatType
{
JS_FORMAT_DEFAULT, /* unspecified */
JS_FORMAT_JSON, /* FORMAT JSON [ENCODING ...] */
- JS_FORMAT_JSONB /* implicit internal format for RETURNING jsonb */
+ JS_FORMAT_JSONB /* implicit internal format for RETURNING
+ * jsonb */
} JsonFormatType;
/*
@@ -1315,7 +1316,7 @@ typedef enum JsonWrapper
typedef struct JsonFormat
{
NodeTag type;
- JsonFormatType format_type; /* format type */
+ JsonFormatType format_type; /* format type */
JsonEncoding encoding; /* JSON encoding */
int location; /* token location, or -1 if unknown */
} JsonFormat;
@@ -1340,7 +1341,7 @@ typedef struct JsonValueExpr
{
NodeTag type;
Expr *raw_expr; /* raw expression */
- Expr *formatted_expr; /* formatted expression or NULL */
+ Expr *formatted_expr; /* formatted expression or NULL */
JsonFormat *format; /* FORMAT clause, if specified */
} JsonValueExpr;
@@ -1367,7 +1368,7 @@ typedef struct JsonConstructorExpr
Expr *func; /* underlying json[b]_xxx() function call */
Expr *coercion; /* coercion to RETURNING type */
JsonReturning *returning; /* RETURNING clause */
- bool absent_on_null; /* ABSENT ON NULL? */
+ bool absent_on_null; /* ABSENT ON NULL? */
bool unique; /* WITH UNIQUE KEYS? (JSON_OBJECT[AGG] only) */
int location;
} JsonConstructorExpr;
@@ -1380,7 +1381,7 @@ typedef enum JsonValueType
{
JS_TYPE_ANY, /* IS JSON [VALUE] */
JS_TYPE_OBJECT, /* IS JSON OBJECT */
- JS_TYPE_ARRAY, /* IS JSON ARRAY*/
+ JS_TYPE_ARRAY, /* IS JSON ARRAY */
JS_TYPE_SCALAR /* IS JSON SCALAR */
} JsonValueType;
@@ -1450,17 +1451,17 @@ typedef struct JsonExpr
{
Expr xpr;
JsonExprOp op; /* json function ID */
- Node *formatted_expr; /* formatted context item expression */
+ Node *formatted_expr; /* formatted context item expression */
JsonCoercion *result_coercion; /* resulting coercion to RETURNING type */
JsonFormat *format; /* context item format (JSON/JSONB) */
Node *path_spec; /* JSON path specification expression */
List *passing_names; /* PASSING argument names */
- List *passing_values; /* PASSING argument values */
+ List *passing_values; /* PASSING argument values */
JsonReturning *returning; /* RETURNING clause type/format info */
JsonBehavior *on_empty; /* ON EMPTY behavior */
JsonBehavior *on_error; /* ON ERROR behavior */
- JsonItemCoercions *coercions; /* coercions for JSON_VALUE */
- JsonWrapper wrapper; /* WRAPPER for JSON_QUERY */
+ JsonItemCoercions *coercions; /* coercions for JSON_VALUE */
+ JsonWrapper wrapper; /* WRAPPER for JSON_QUERY */
bool omit_quotes; /* KEEP/OMIT QUOTES for JSON_QUERY */
int location; /* token location, or -1 if unknown */
} JsonExpr;
@@ -1472,13 +1473,15 @@ typedef struct JsonExpr
typedef struct JsonTableParent
{
NodeTag type;
- Const *path; /* jsonpath constant */
- char *name; /* path name */
- Node *child; /* nested columns, if any */
- bool outerJoin; /* outer or inner join for nested columns? */
- int colMin; /* min column index in the resulting column list */
- int colMax; /* max column index in the resulting column list */
- bool errorOnError; /* ERROR/EMPTY ON ERROR behavior */
+ Const *path; /* jsonpath constant */
+ char *name; /* path name */
+ Node *child; /* nested columns, if any */
+ bool outerJoin; /* outer or inner join for nested columns? */
+ int colMin; /* min column index in the resulting column
+ * list */
+ int colMax; /* max column index in the resulting column
+ * list */
+ bool errorOnError; /* ERROR/EMPTY ON ERROR behavior */
} JsonTableParent;
/*
@@ -1488,9 +1491,9 @@ typedef struct JsonTableParent
typedef struct JsonTableSibling
{
NodeTag type;
- Node *larg; /* left join node */
- Node *rarg; /* right join node */
- bool cross; /* cross or union join? */
+ Node *larg; /* left join node */
+ Node *rarg; /* right join node */
+ bool cross; /* cross or union join? */
} JsonTableSibling;
/* ----------------
diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h
index 3d95e6bfc88..b6e137cf83f 100644
--- a/src/include/optimizer/paths.h
+++ b/src/include/optimizer/paths.h
@@ -203,9 +203,9 @@ typedef enum
extern PathKeysComparison compare_pathkeys(List *keys1, List *keys2);
extern bool pathkeys_contained_in(List *keys1, List *keys2);
extern bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common);
-extern int group_keys_reorder_by_pathkeys(List *pathkeys,
- List **group_pathkeys,
- List **group_clauses);
+extern int group_keys_reorder_by_pathkeys(List *pathkeys,
+ List **group_pathkeys,
+ List **group_clauses);
extern List *get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
List *path_pathkeys,
List *pathkeys, List *clauses);
diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h
index c69920d1087..dc379547c70 100644
--- a/src/include/parser/analyze.h
+++ b/src/include/parser/analyze.h
@@ -26,7 +26,7 @@ extern PGDLLIMPORT post_parse_analyze_hook_type post_parse_analyze_hook;
extern Query *parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText,
- const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv);
+ const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv);
extern Query *parse_analyze_varparams(RawStmt *parseTree, const char *sourceText,
Oid **paramTypes, int *numParams, QueryEnvironment *queryEnv);
extern Query *parse_analyze_withcb(RawStmt *parseTree, const char *sourceText,
diff --git a/src/include/parser/parse_param.h b/src/include/parser/parse_param.h
index d6f0b656495..df1ee660d83 100644
--- a/src/include/parser/parse_param.h
+++ b/src/include/parser/parse_param.h
@@ -16,9 +16,9 @@
#include "parser/parse_node.h"
extern void setup_parse_fixed_parameters(ParseState *pstate,
- const Oid *paramTypes, int numParams);
+ const Oid *paramTypes, int numParams);
extern void setup_parse_variable_parameters(ParseState *pstate,
- Oid **paramTypes, int *numParams);
+ Oid **paramTypes, int *numParams);
extern void check_variable_parameters(ParseState *pstate, Query *query);
extern bool query_contains_extern_params(Query *query);
diff --git a/src/include/port.h b/src/include/port.h
index 3d103a2b31b..12c05b5d9f9 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -140,7 +140,7 @@ extern char *pipe_read_line(char *cmd, char *line, int maxsize);
#ifdef EXEC_BACKEND
/* Disable ASLR before exec, for developer builds only (in exec.c) */
-extern int pg_disable_aslr(void);
+extern int pg_disable_aslr(void);
#endif
diff --git a/src/include/postmaster/pgarch.h b/src/include/postmaster/pgarch.h
index 38cb1c64774..f366a159a8e 100644
--- a/src/include/postmaster/pgarch.h
+++ b/src/include/postmaster/pgarch.h
@@ -55,19 +55,19 @@ typedef struct ArchiveModuleCallbacks
ArchiveCheckConfiguredCB check_configured_cb;
ArchiveFileCB archive_file_cb;
ArchiveShutdownCB shutdown_cb;
-} ArchiveModuleCallbacks;
+} ArchiveModuleCallbacks;
/*
* Type of the shared library symbol _PG_archive_module_init that is looked
* up when loading an archive library.
*/
-typedef void (*ArchiveModuleInit) (ArchiveModuleCallbacks * cb);
+typedef void (*ArchiveModuleInit) (ArchiveModuleCallbacks *cb);
/*
* Since the logic for archiving via a shell command is in the core server
* and does not need to be loaded via a shared library, it has a special
* initialization function.
*/
-extern void shell_archive_init(ArchiveModuleCallbacks * cb);
+extern void shell_archive_init(ArchiveModuleCallbacks *cb);
#endif /* _PGARCH_H */
diff --git a/src/include/replication/basebackup_target.h b/src/include/replication/basebackup_target.h
index e23ac29a89d..1cf3c0777dc 100644
--- a/src/include/replication/basebackup_target.h
+++ b/src/include/replication/basebackup_target.h
@@ -42,7 +42,7 @@ typedef struct BaseBackupTargetHandle BaseBackupTargetHandle;
*/
extern void BaseBackupAddTarget(char *name,
void *(*check_detail) (char *, char *),
- bbsink * (*get_sink) (bbsink *, void *));
+ bbsink *(*get_sink) (bbsink *, void *));
/*
* These functions are used by the core code to access base backup targets
diff --git a/src/include/replication/decode.h b/src/include/replication/decode.h
index a33c2a718a7..741bf65cf7a 100644
--- a/src/include/replication/decode.h
+++ b/src/include/replication/decode.h
@@ -28,7 +28,7 @@ extern void xact_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
extern void standby_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
extern void logicalmsg_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
-extern void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx,
+extern void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx,
XLogReaderState *record);
#endif
diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h
index 1ee63c4cf44..8c9f3321d50 100644
--- a/src/include/replication/slot.h
+++ b/src/include/replication/slot.h
@@ -216,7 +216,7 @@ extern bool ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive);
extern void ReplicationSlotsDropDBSlots(Oid dboid);
extern bool InvalidateObsoleteReplicationSlots(XLogSegNo oldestSegno);
extern ReplicationSlot *SearchNamedReplicationSlot(const char *name, bool need_lock);
-extern int ReplicationSlotIndex(ReplicationSlot *slot);
+extern int ReplicationSlotIndex(ReplicationSlot *slot);
extern void ReplicationSlotNameForTablesync(Oid suboid, Oid relid, char *syncslotname, int szslot);
extern void ReplicationSlotDropAtPubNode(WalReceiverConn *wrconn, char *slotname, bool missing_ok);
diff --git a/src/include/storage/latch.h b/src/include/storage/latch.h
index 0dd79d73fa2..68ab740f161 100644
--- a/src/include/storage/latch.h
+++ b/src/include/storage/latch.h
@@ -181,6 +181,6 @@ extern int WaitLatchOrSocket(Latch *latch, int wakeEvents,
pgsocket sock, long timeout, uint32 wait_event_info);
extern void InitializeLatchWaitSet(void);
extern int GetNumRegisteredWaitEvents(WaitEventSet *set);
-extern bool WaitEventSetCanReportClosed(void);
+extern bool WaitEventSetCanReportClosed(void);
#endif /* LATCH_H */
diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h
index 87e408b7199..70d9dab25b8 100644
--- a/src/include/tcop/tcopprot.h
+++ b/src/include/tcop/tcopprot.h
@@ -46,9 +46,9 @@ extern PGDLLIMPORT int log_statement;
extern List *pg_parse_query(const char *query_string);
extern List *pg_rewrite_query(Query *query);
extern List *pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree,
- const char *query_string,
- const Oid *paramTypes, int numParams,
- QueryEnvironment *queryEnv);
+ const char *query_string,
+ const Oid *paramTypes, int numParams,
+ QueryEnvironment *queryEnv);
extern List *pg_analyze_and_rewrite_varparams(RawStmt *parsetree,
const char *query_string,
Oid **paramTypes,
diff --git a/src/include/utils/formatting.h b/src/include/utils/formatting.h
index 0a22af80a21..f048eb0869d 100644
--- a/src/include/utils/formatting.h
+++ b/src/include/utils/formatting.h
@@ -32,6 +32,6 @@ extern char *asc_initcap(const char *buff, size_t nbytes);
extern Datum parse_datetime(text *date_txt, text *fmt, Oid collid, bool strict,
Oid *typid, int32 *typmod, int *tz,
bool *have_error);
-extern int datetime_format_flags(const char *fmt_str, bool *have_error);
+extern int datetime_format_flags(const char *fmt_str, bool *have_error);
#endif
diff --git a/src/include/utils/jsonpath.h b/src/include/utils/jsonpath.h
index 358b9eb6110..8e79b8dc9f0 100644
--- a/src/include/utils/jsonpath.h
+++ b/src/include/utils/jsonpath.h
@@ -263,8 +263,8 @@ typedef struct JsonPathVariableEvalContext
Oid typid;
int32 typmod;
struct ExprContext *econtext;
- struct ExprState *estate;
- MemoryContext mcxt; /* memory context for cached value */
+ struct ExprState *estate;
+ MemoryContext mcxt; /* memory context for cached value */
Datum value;
bool isnull;
bool evaluated;
@@ -274,14 +274,14 @@ typedef struct JsonPathVariableEvalContext
extern void JsonItemFromDatum(Datum val, Oid typid, int32 typmod,
JsonbValue *res);
-extern bool JsonPathExists(Datum jb, JsonPath *path, List *vars, bool *error);
+extern bool JsonPathExists(Datum jb, JsonPath *path, List *vars, bool *error);
extern Datum JsonPathQuery(Datum jb, JsonPath *jp, JsonWrapper wrapper,
bool *empty, bool *error, List *vars);
extern JsonbValue *JsonPathValue(Datum jb, JsonPath *jp, bool *empty,
bool *error, List *vars);
-extern int EvalJsonPathVar(void *vars, char *varName, int varNameLen,
- JsonbValue *val, JsonbValue *baseObject);
+extern int EvalJsonPathVar(void *vars, char *varName, int varNameLen,
+ JsonbValue *val, JsonbValue *baseObject);
extern PGDLLIMPORT const TableFuncRoutine JsonbTableRoutine;
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index eadbd009045..90b3c49bc12 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -159,7 +159,7 @@ typedef struct RelationData
Bitmapset *rd_keyattr; /* cols that can be ref'd by foreign keys */
Bitmapset *rd_pkattr; /* cols included in primary key */
Bitmapset *rd_idattr; /* included in replica identity index */
- Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */
+ Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */
PublicationDesc *rd_pubdesc; /* publication descriptor, or NULL */
@@ -246,7 +246,7 @@ typedef struct RelationData
*/
Oid rd_toastoid; /* Real TOAST table's OID, or InvalidOid */
- bool pgstat_enabled; /* should relation stats be counted */
+ bool pgstat_enabled; /* should relation stats be counted */
/* use "struct" here to avoid needing to include pgstat.h: */
struct PgStat_TableStatus *pgstat_info; /* statistics collection area */
} RelationData;
diff --git a/src/include/utils/relmapper.h b/src/include/utils/relmapper.h
index f10353e1390..557f77e35a9 100644
--- a/src/include/utils/relmapper.h
+++ b/src/include/utils/relmapper.h
@@ -38,7 +38,7 @@ typedef struct xl_relmap_update
extern Oid RelationMapOidToFilenode(Oid relationId, bool shared);
extern Oid RelationMapFilenodeToOid(Oid relationId, bool shared);
-extern Oid RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId);
+extern Oid RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId);
extern void RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath,
char *dstdbpath);
extern void RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index c313a08d541..d485b9bfcd9 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -215,9 +215,9 @@ extern double estimate_num_groups(PlannerInfo *root, List *groupExprs,
EstimationInfo *estinfo);
extern double estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs,
- double input_rows, List **pgset,
- EstimationInfo *estinfo,
- List **cache_varinfos, int prevNExprs);
+ double input_rows, List **pgset,
+ EstimationInfo *estinfo,
+ List **cache_varinfos, int prevNExprs);
extern void estimate_hash_bucket_stats(PlannerInfo *root,
Node *hashkey, double nbuckets,
diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h
index 140a9f9ffc4..8c36cf8d82c 100644
--- a/src/include/utils/sortsupport.h
+++ b/src/include/utils/sortsupport.h
@@ -375,11 +375,11 @@ ApplySortAbbrevFullComparator(Datum datum1, bool isNull1,
* Datatypes that install these as their comparator or abbrevated comparator
* are eligible for faster sorting.
*/
-extern int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup);
+extern int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup);
#if SIZEOF_DATUM >= 8
-extern int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup);
+extern int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup);
#endif
-extern int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup);
+extern int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup);
/* Other functions in utils/sort/sortsupport.c */
extern void PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup);
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index 6fceff561b9..0a072a36dc2 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -1123,10 +1123,10 @@ pg_fe_getusername(uid_t user_id, PQExpBuffer errorMessage)
/*
* Some users are using configure --enable-thread-safety-force, so we
- * might as well do the locking within our library to protect
- * getpwuid(). In fact, application developers can use getpwuid() in
- * their application if they use the locking call we provide, or install
- * their own locking function using PQregisterThreadLock().
+ * might as well do the locking within our library to protect getpwuid().
+ * In fact, application developers can use getpwuid() in their application
+ * if they use the locking call we provide, or install their own locking
+ * function using PQregisterThreadLock().
*/
pglock_thread();
diff --git a/src/interfaces/libpq/fe-secure-common.c b/src/interfaces/libpq/fe-secure-common.c
index 165a6ed9b7b..8046fcd884a 100644
--- a/src/interfaces/libpq/fe-secure-common.c
+++ b/src/interfaces/libpq/fe-secure-common.c
@@ -203,6 +203,7 @@ pq_verify_peer_name_matches_certificate_ip(PGconn *conn,
match = 1;
}
}
+
/*
* If they don't have inet_pton(), skip this. Then, an IPv6 address in a
* certificate will cause an error.
diff --git a/src/interfaces/libpq/t/002_api.pl b/src/interfaces/libpq/t/002_api.pl
index 8b3355e6dd6..fa00221ae29 100644
--- a/src/interfaces/libpq/t/002_api.pl
+++ b/src/interfaces/libpq/t/002_api.pl
@@ -6,7 +6,7 @@ use PostgreSQL::Test::Utils;
use Test::More;
# Test PQsslAttribute(NULL, "library")
-my ($out, $err) = run_command(['libpq_testclient', '--ssl']);
+my ($out, $err) = run_command([ 'libpq_testclient', '--ssl' ]);
if ($ENV{with_ssl} eq 'openssl')
{
@@ -14,7 +14,9 @@ if ($ENV{with_ssl} eq 'openssl')
}
else
{
- is($err, 'SSL is not enabled', 'PQsslAttribute(NULL, "library") returns NULL');
+ is( $err,
+ 'SSL is not enabled',
+ 'PQsslAttribute(NULL, "library") returns NULL');
}
done_testing();
diff --git a/src/test/icu/t/010_database.pl b/src/test/icu/t/010_database.pl
index 07a1084b09d..7035ff3c209 100644
--- a/src/test/icu/t/010_database.pl
+++ b/src/test/icu/t/010_database.pl
@@ -16,30 +16,34 @@ $node1->init;
$node1->start;
$node1->safe_psql('postgres',
- q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' ICU_LOCALE 'en@colCaseFirst=upper' ENCODING 'UTF8' TEMPLATE template0});
+ q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' ICU_LOCALE 'en@colCaseFirst=upper' ENCODING 'UTF8' TEMPLATE template0}
+);
-$node1->safe_psql('dbicu',
-q{
+$node1->safe_psql(
+ 'dbicu',
+ q{
CREATE COLLATION upperfirst (provider = icu, locale = 'en@colCaseFirst=upper');
CREATE TABLE icu (def text, en text COLLATE "en-x-icu", upfirst text COLLATE upperfirst);
INSERT INTO icu VALUES ('a', 'a', 'a'), ('b', 'b', 'b'), ('A', 'A', 'A'), ('B', 'B', 'B');
});
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def}),
+is( $node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def}),
qq(A
a
B
b),
'sort by database default locale');
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def COLLATE "en-x-icu"}),
+is( $node1->safe_psql(
+ 'dbicu', q{SELECT def FROM icu ORDER BY def COLLATE "en-x-icu"}),
qq(a
A
b
B),
'sort by explicit collation standard');
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY en COLLATE upperfirst}),
+is( $node1->safe_psql(
+ 'dbicu', q{SELECT def FROM icu ORDER BY en COLLATE upperfirst}),
qq(A
a
B
@@ -51,8 +55,12 @@ b),
my ($ret, $stdout, $stderr) = $node1->psql('postgres',
q{CREATE DATABASE dbicu LOCALE_PROVIDER icu TEMPLATE template0});
-isnt($ret, 0, "ICU locale must be specified for ICU provider: exit code not 0");
-like($stderr, qr/ERROR: ICU locale must be specified/, "ICU locale must be specified for ICU provider: error message");
+isnt($ret, 0,
+ "ICU locale must be specified for ICU provider: exit code not 0");
+like(
+ $stderr,
+ qr/ERROR: ICU locale must be specified/,
+ "ICU locale must be specified for ICU provider: error message");
done_testing();
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index b342146e556..86dff8bd1f3 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -46,7 +46,8 @@ elsif ($^O eq 'openbsd')
}
else
{
- plan skip_all => "ldap tests not supported on $^O or dependencies not installed";
+ plan skip_all =>
+ "ldap tests not supported on $^O or dependencies not installed";
}
# make your own edits here
diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
index cc79d96d473..4cb1170438a 100644
--- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
+++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
@@ -28,7 +28,8 @@ for my $testname (@tests)
pipeline_abort transaction disallowed_in_pipeline)) > 0;
# For a bunch of tests, generate a libpq trace file too.
- my $traceout = "$PostgreSQL::Test::Utils::tmp_check/traces/$testname.trace";
+ my $traceout =
+ "$PostgreSQL::Test::Utils::tmp_check/traces/$testname.trace";
if ($cmptrace)
{
push @extraargs, "-t", $traceout;
diff --git a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
index 0429861b16a..5be5ac39eb6 100644
--- a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
+++ b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
@@ -63,7 +63,8 @@ like(
$node->append_conf('postgresql.conf', "ssl_passphrase.passphrase = 'blurfl'");
# try to start the server again
-my $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $node->data_dir, '-l',
+my $ret =
+ PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $node->data_dir, '-l',
$node->logfile, 'start');
diff --git a/src/test/modules/test_misc/t/002_tablespace.pl b/src/test/modules/test_misc/t/002_tablespace.pl
index 04e54394c12..95cd2b7b65f 100644
--- a/src/test/modules/test_misc/t/002_tablespace.pl
+++ b/src/test/modules/test_misc/t/002_tablespace.pl
@@ -13,9 +13,9 @@ $node->init;
$node->start;
# Create a couple of directories to use as tablespaces.
-my $basedir = $node->basedir();
+my $basedir = $node->basedir();
my $TS1_LOCATION = "$basedir/ts1";
-$TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar
+$TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar
mkdir($TS1_LOCATION);
my $TS2_LOCATION = "$basedir/ts2";
$TS2_LOCATION =~ s/\/\.\//\//g;
@@ -34,13 +34,11 @@ $result = $node->psql('postgres',
ok($result != 0, 'clobber tablespace with absolute path');
# Create table in it
-$result = $node->psql('postgres',
- "CREATE TABLE t () TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
ok($result == 0, 'create table in tablespace with absolute path');
# Can't drop a tablespace that still has a table in it
-$result = $node->psql('postgres',
- "DROP TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
ok($result != 0, 'drop tablespace with absolute path');
# Drop the table
@@ -60,32 +58,28 @@ $result = $node->psql('postgres',
"CREATE TABLESPACE regress_ts2 LOCATION '$TS2_LOCATION'");
ok($result == 0, 'create tablespace 2 with absolute path');
$result = $node->psql('postgres',
- "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''");
+ "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''"
+);
ok($result == 0, 'create tablespace 3 with in-place directory');
$result = $node->psql('postgres',
- "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''");
+ "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''"
+);
ok($result == 0, 'create tablespace 4 with in-place directory');
# Create a table and test moving between absolute and in-place tablespaces
-$result = $node->psql('postgres',
- "CREATE TABLE t () TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
ok($result == 0, 'create table in tablespace 1');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts2");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts2");
ok($result == 0, 'move table abs->abs');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts3");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts3");
ok($result == 0, 'move table abs->in-place');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts4");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts4");
ok($result == 0, 'move table in-place->in-place');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts1");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts1");
ok($result == 0, 'move table in-place->abs');
# Drop everything
-$result = $node->psql('postgres',
- "DROP TABLE t");
+$result = $node->psql('postgres', "DROP TABLE t");
ok($result == 0, 'create table in tablespace 1');
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
ok($result == 0, 'drop tablespace 1');
diff --git a/src/test/modules/test_oat_hooks/test_oat_hooks.c b/src/test/modules/test_oat_hooks/test_oat_hooks.c
index 6f9838f93b5..7ef272cc7ae 100644
--- a/src/test/modules/test_oat_hooks/test_oat_hooks.c
+++ b/src/test/modules/test_oat_hooks/test_oat_hooks.c
@@ -234,9 +234,9 @@ static void
emit_audit_message(const char *type, const char *hook, char *action, char *objName)
{
/*
- * Ensure that audit messages are not duplicated by only emitting them from
- * a leader process, not a worker process. This makes the test results
- * deterministic even if run with force_parallel_mode = regress.
+ * Ensure that audit messages are not duplicated by only emitting them
+ * from a leader process, not a worker process. This makes the test
+ * results deterministic even if run with force_parallel_mode = regress.
*/
if (REGRESS_audit && !IsParallelWorker())
{
@@ -285,7 +285,7 @@ REGRESS_object_access_hook_str(ObjectAccessType access, Oid classId, const char
if (next_object_access_hook_str)
{
- (*next_object_access_hook_str)(access, classId, objName, subId, arg);
+ (*next_object_access_hook_str) (access, classId, objName, subId, arg);
}
switch (access)
@@ -325,7 +325,7 @@ REGRESS_object_access_hook_str(ObjectAccessType access, Oid classId, const char
}
static void
-REGRESS_object_access_hook (ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg)
+REGRESS_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg)
{
audit_attempt("object access",
accesstype_to_string(access, 0),
@@ -340,7 +340,7 @@ REGRESS_object_access_hook (ObjectAccessType access, Oid classId, Oid objectId,
/* Forward to next hook in the chain */
if (next_object_access_hook)
- (*next_object_access_hook)(access, classId, objectId, subId, arg);
+ (*next_object_access_hook) (access, classId, objectId, subId, arg);
audit_success("object access",
accesstype_to_string(access, 0),
@@ -381,18 +381,18 @@ REGRESS_exec_check_perms(List *rangeTabls, bool do_abort)
static void
REGRESS_utility_command(PlannedStmt *pstmt,
- const char *queryString,
- bool readOnlyTree,
- ProcessUtilityContext context,
- ParamListInfo params,
- QueryEnvironment *queryEnv,
- DestReceiver *dest,
- QueryCompletion *qc)
+ const char *queryString,
+ bool readOnlyTree,
+ ProcessUtilityContext context,
+ ParamListInfo params,
+ QueryEnvironment *queryEnv,
+ DestReceiver *dest,
+ QueryCompletion *qc)
{
Node *parsetree = pstmt->utilityStmt;
const char *action;
- NodeTag tag = nodeTag(parsetree);
+ NodeTag tag = nodeTag(parsetree);
switch (tag)
{
@@ -441,438 +441,1302 @@ nodetag_to_string(NodeTag tag)
{
switch (tag)
{
- case T_Invalid: return "Invalid"; break;
- case T_IndexInfo: return "IndexInfo"; break;
- case T_ExprContext: return "ExprContext"; break;
- case T_ProjectionInfo: return "ProjectionInfo"; break;
- case T_JunkFilter: return "JunkFilter"; break;
- case T_OnConflictSetState: return "OnConflictSetState"; break;
- case T_ResultRelInfo: return "ResultRelInfo"; break;
- case T_EState: return "EState"; break;
- case T_TupleTableSlot: return "TupleTableSlot"; break;
- case T_Plan: return "Plan"; break;
- case T_Result: return "Result"; break;
- case T_ProjectSet: return "ProjectSet"; break;
- case T_ModifyTable: return "ModifyTable"; break;
- case T_Append: return "Append"; break;
- case T_MergeAppend: return "MergeAppend"; break;
- case T_RecursiveUnion: return "RecursiveUnion"; break;
- case T_BitmapAnd: return "BitmapAnd"; break;
- case T_BitmapOr: return "BitmapOr"; break;
- case T_Scan: return "Scan"; break;
- case T_SeqScan: return "SeqScan"; break;
- case T_SampleScan: return "SampleScan"; break;
- case T_IndexScan: return "IndexScan"; break;
- case T_IndexOnlyScan: return "IndexOnlyScan"; break;
- case T_BitmapIndexScan: return "BitmapIndexScan"; break;
- case T_BitmapHeapScan: return "BitmapHeapScan"; break;
- case T_TidScan: return "TidScan"; break;
- case T_TidRangeScan: return "TidRangeScan"; break;
- case T_SubqueryScan: return "SubqueryScan"; break;
- case T_FunctionScan: return "FunctionScan"; break;
- case T_ValuesScan: return "ValuesScan"; break;
- case T_TableFuncScan: return "TableFuncScan"; break;
- case T_CteScan: return "CteScan"; break;
- case T_NamedTuplestoreScan: return "NamedTuplestoreScan"; break;
- case T_WorkTableScan: return "WorkTableScan"; break;
- case T_ForeignScan: return "ForeignScan"; break;
- case T_CustomScan: return "CustomScan"; break;
- case T_Join: return "Join"; break;
- case T_NestLoop: return "NestLoop"; break;
- case T_MergeJoin: return "MergeJoin"; break;
- case T_HashJoin: return "HashJoin"; break;
- case T_Material: return "Material"; break;
- case T_Memoize: return "Memoize"; break;
- case T_Sort: return "Sort"; break;
- case T_IncrementalSort: return "IncrementalSort"; break;
- case T_Group: return "Group"; break;
- case T_Agg: return "Agg"; break;
- case T_WindowAgg: return "WindowAgg"; break;
- case T_Unique: return "Unique"; break;
- case T_Gather: return "Gather"; break;
- case T_GatherMerge: return "GatherMerge"; break;
- case T_Hash: return "Hash"; break;
- case T_SetOp: return "SetOp"; break;
- case T_LockRows: return "LockRows"; break;
- case T_Limit: return "Limit"; break;
- case T_NestLoopParam: return "NestLoopParam"; break;
- case T_PlanRowMark: return "PlanRowMark"; break;
- case T_PartitionPruneInfo: return "PartitionPruneInfo"; break;
- case T_PartitionedRelPruneInfo: return "PartitionedRelPruneInfo"; break;
- case T_PartitionPruneStepOp: return "PartitionPruneStepOp"; break;
- case T_PartitionPruneStepCombine: return "PartitionPruneStepCombine"; break;
- case T_PlanInvalItem: return "PlanInvalItem"; break;
- case T_PlanState: return "PlanState"; break;
- case T_ResultState: return "ResultState"; break;
- case T_ProjectSetState: return "ProjectSetState"; break;
- case T_ModifyTableState: return "ModifyTableState"; break;
- case T_AppendState: return "AppendState"; break;
- case T_MergeAppendState: return "MergeAppendState"; break;
- case T_RecursiveUnionState: return "RecursiveUnionState"; break;
- case T_BitmapAndState: return "BitmapAndState"; break;
- case T_BitmapOrState: return "BitmapOrState"; break;
- case T_ScanState: return "ScanState"; break;
- case T_SeqScanState: return "SeqScanState"; break;
- case T_SampleScanState: return "SampleScanState"; break;
- case T_IndexScanState: return "IndexScanState"; break;
- case T_IndexOnlyScanState: return "IndexOnlyScanState"; break;
- case T_BitmapIndexScanState: return "BitmapIndexScanState"; break;
- case T_BitmapHeapScanState: return "BitmapHeapScanState"; break;
- case T_TidScanState: return "TidScanState"; break;
- case T_TidRangeScanState: return "TidRangeScanState"; break;
- case T_SubqueryScanState: return "SubqueryScanState"; break;
- case T_FunctionScanState: return "FunctionScanState"; break;
- case T_TableFuncScanState: return "TableFuncScanState"; break;
- case T_ValuesScanState: return "ValuesScanState"; break;
- case T_CteScanState: return "CteScanState"; break;
- case T_NamedTuplestoreScanState: return "NamedTuplestoreScanState"; break;
- case T_WorkTableScanState: return "WorkTableScanState"; break;
- case T_ForeignScanState: return "ForeignScanState"; break;
- case T_CustomScanState: return "CustomScanState"; break;
- case T_JoinState: return "JoinState"; break;
- case T_NestLoopState: return "NestLoopState"; break;
- case T_MergeJoinState: return "MergeJoinState"; break;
- case T_HashJoinState: return "HashJoinState"; break;
- case T_MaterialState: return "MaterialState"; break;
- case T_MemoizeState: return "MemoizeState"; break;
- case T_SortState: return "SortState"; break;
- case T_IncrementalSortState: return "IncrementalSortState"; break;
- case T_GroupState: return "GroupState"; break;
- case T_AggState: return "AggState"; break;
- case T_WindowAggState: return "WindowAggState"; break;
- case T_UniqueState: return "UniqueState"; break;
- case T_GatherState: return "GatherState"; break;
- case T_GatherMergeState: return "GatherMergeState"; break;
- case T_HashState: return "HashState"; break;
- case T_SetOpState: return "SetOpState"; break;
- case T_LockRowsState: return "LockRowsState"; break;
- case T_LimitState: return "LimitState"; break;
- case T_Alias: return "Alias"; break;
- case T_RangeVar: return "RangeVar"; break;
- case T_TableFunc: return "TableFunc"; break;
- case T_Var: return "Var"; break;
- case T_Const: return "Const"; break;
- case T_Param: return "Param"; break;
- case T_Aggref: return "Aggref"; break;
- case T_GroupingFunc: return "GroupingFunc"; break;
- case T_WindowFunc: return "WindowFunc"; break;
- case T_SubscriptingRef: return "SubscriptingRef"; break;
- case T_FuncExpr: return "FuncExpr"; break;
- case T_NamedArgExpr: return "NamedArgExpr"; break;
- case T_OpExpr: return "OpExpr"; break;
- case T_DistinctExpr: return "DistinctExpr"; break;
- case T_NullIfExpr: return "NullIfExpr"; break;
- case T_ScalarArrayOpExpr: return "ScalarArrayOpExpr"; break;
- case T_BoolExpr: return "BoolExpr"; break;
- case T_SubLink: return "SubLink"; break;
- case T_SubPlan: return "SubPlan"; break;
- case T_AlternativeSubPlan: return "AlternativeSubPlan"; break;
- case T_FieldSelect: return "FieldSelect"; break;
- case T_FieldStore: return "FieldStore"; break;
- case T_RelabelType: return "RelabelType"; break;
- case T_CoerceViaIO: return "CoerceViaIO"; break;
- case T_ArrayCoerceExpr: return "ArrayCoerceExpr"; break;
- case T_ConvertRowtypeExpr: return "ConvertRowtypeExpr"; break;
- case T_CollateExpr: return "CollateExpr"; break;
- case T_CaseExpr: return "CaseExpr"; break;
- case T_CaseWhen: return "CaseWhen"; break;
- case T_CaseTestExpr: return "CaseTestExpr"; break;
- case T_ArrayExpr: return "ArrayExpr"; break;
- case T_RowExpr: return "RowExpr"; break;
- case T_RowCompareExpr: return "RowCompareExpr"; break;
- case T_CoalesceExpr: return "CoalesceExpr"; break;
- case T_MinMaxExpr: return "MinMaxExpr"; break;
- case T_SQLValueFunction: return "SQLValueFunction"; break;
- case T_XmlExpr: return "XmlExpr"; break;
- case T_NullTest: return "NullTest"; break;
- case T_BooleanTest: return "BooleanTest"; break;
- case T_CoerceToDomain: return "CoerceToDomain"; break;
- case T_CoerceToDomainValue: return "CoerceToDomainValue"; break;
- case T_SetToDefault: return "SetToDefault"; break;
- case T_CurrentOfExpr: return "CurrentOfExpr"; break;
- case T_NextValueExpr: return "NextValueExpr"; break;
- case T_InferenceElem: return "InferenceElem"; break;
- case T_TargetEntry: return "TargetEntry"; break;
- case T_RangeTblRef: return "RangeTblRef"; break;
- case T_JoinExpr: return "JoinExpr"; break;
- case T_FromExpr: return "FromExpr"; break;
- case T_OnConflictExpr: return "OnConflictExpr"; break;
- case T_IntoClause: return "IntoClause"; break;
- case T_ExprState: return "ExprState"; break;
- case T_WindowFuncExprState: return "WindowFuncExprState"; break;
- case T_SetExprState: return "SetExprState"; break;
- case T_SubPlanState: return "SubPlanState"; break;
- case T_DomainConstraintState: return "DomainConstraintState"; break;
- case T_PlannerInfo: return "PlannerInfo"; break;
- case T_PlannerGlobal: return "PlannerGlobal"; break;
- case T_RelOptInfo: return "RelOptInfo"; break;
- case T_IndexOptInfo: return "IndexOptInfo"; break;
- case T_ForeignKeyOptInfo: return "ForeignKeyOptInfo"; break;
- case T_ParamPathInfo: return "ParamPathInfo"; break;
- case T_Path: return "Path"; break;
- case T_IndexPath: return "IndexPath"; break;
- case T_BitmapHeapPath: return "BitmapHeapPath"; break;
- case T_BitmapAndPath: return "BitmapAndPath"; break;
- case T_BitmapOrPath: return "BitmapOrPath"; break;
- case T_TidPath: return "TidPath"; break;
- case T_TidRangePath: return "TidRangePath"; break;
- case T_SubqueryScanPath: return "SubqueryScanPath"; break;
- case T_ForeignPath: return "ForeignPath"; break;
- case T_CustomPath: return "CustomPath"; break;
- case T_NestPath: return "NestPath"; break;
- case T_MergePath: return "MergePath"; break;
- case T_HashPath: return "HashPath"; break;
- case T_AppendPath: return "AppendPath"; break;
- case T_MergeAppendPath: return "MergeAppendPath"; break;
- case T_GroupResultPath: return "GroupResultPath"; break;
- case T_MaterialPath: return "MaterialPath"; break;
- case T_MemoizePath: return "MemoizePath"; break;
- case T_UniquePath: return "UniquePath"; break;
- case T_GatherPath: return "GatherPath"; break;
- case T_GatherMergePath: return "GatherMergePath"; break;
- case T_ProjectionPath: return "ProjectionPath"; break;
- case T_ProjectSetPath: return "ProjectSetPath"; break;
- case T_SortPath: return "SortPath"; break;
- case T_IncrementalSortPath: return "IncrementalSortPath"; break;
- case T_GroupPath: return "GroupPath"; break;
- case T_UpperUniquePath: return "UpperUniquePath"; break;
- case T_AggPath: return "AggPath"; break;
- case T_GroupingSetsPath: return "GroupingSetsPath"; break;
- case T_MinMaxAggPath: return "MinMaxAggPath"; break;
- case T_WindowAggPath: return "WindowAggPath"; break;
- case T_SetOpPath: return "SetOpPath"; break;
- case T_RecursiveUnionPath: return "RecursiveUnionPath"; break;
- case T_LockRowsPath: return "LockRowsPath"; break;
- case T_ModifyTablePath: return "ModifyTablePath"; break;
- case T_LimitPath: return "LimitPath"; break;
- case T_EquivalenceClass: return "EquivalenceClass"; break;
- case T_EquivalenceMember: return "EquivalenceMember"; break;
- case T_PathKey: return "PathKey"; break;
- case T_PathTarget: return "PathTarget"; break;
- case T_RestrictInfo: return "RestrictInfo"; break;
- case T_IndexClause: return "IndexClause"; break;
- case T_PlaceHolderVar: return "PlaceHolderVar"; break;
- case T_SpecialJoinInfo: return "SpecialJoinInfo"; break;
- case T_AppendRelInfo: return "AppendRelInfo"; break;
- case T_RowIdentityVarInfo: return "RowIdentityVarInfo"; break;
- case T_PlaceHolderInfo: return "PlaceHolderInfo"; break;
- case T_MinMaxAggInfo: return "MinMaxAggInfo"; break;
- case T_PlannerParamItem: return "PlannerParamItem"; break;
- case T_RollupData: return "RollupData"; break;
- case T_GroupingSetData: return "GroupingSetData"; break;
- case T_StatisticExtInfo: return "StatisticExtInfo"; break;
- case T_AllocSetContext: return "AllocSetContext"; break;
- case T_SlabContext: return "SlabContext"; break;
- case T_GenerationContext: return "GenerationContext"; break;
- case T_Integer: return "Integer"; break;
- case T_Float: return "Float"; break;
- case T_Boolean: return "Boolean"; break;
- case T_String: return "String"; break;
- case T_BitString: return "BitString"; break;
- case T_List: return "List"; break;
- case T_IntList: return "IntList"; break;
- case T_OidList: return "OidList"; break;
- case T_ExtensibleNode: return "ExtensibleNode"; break;
- case T_RawStmt: return "RawStmt"; break;
- case T_Query: return "Query"; break;
- case T_PlannedStmt: return "PlannedStmt"; break;
- case T_InsertStmt: return "InsertStmt"; break;
- case T_DeleteStmt: return "DeleteStmt"; break;
- case T_UpdateStmt: return "UpdateStmt"; break;
- case T_SelectStmt: return "SelectStmt"; break;
- case T_ReturnStmt: return "ReturnStmt"; break;
- case T_PLAssignStmt: return "PLAssignStmt"; break;
- case T_AlterTableStmt: return "AlterTableStmt"; break;
- case T_AlterTableCmd: return "AlterTableCmd"; break;
- case T_AlterDomainStmt: return "AlterDomainStmt"; break;
- case T_SetOperationStmt: return "SetOperationStmt"; break;
- case T_GrantStmt: return "GrantStmt"; break;
- case T_GrantRoleStmt: return "GrantRoleStmt"; break;
- case T_AlterDefaultPrivilegesStmt: return "AlterDefaultPrivilegesStmt"; break;
- case T_ClosePortalStmt: return "ClosePortalStmt"; break;
- case T_ClusterStmt: return "ClusterStmt"; break;
- case T_CopyStmt: return "CopyStmt"; break;
- case T_CreateStmt: return "CreateStmt"; break;
- case T_DefineStmt: return "DefineStmt"; break;
- case T_DropStmt: return "DropStmt"; break;
- case T_TruncateStmt: return "TruncateStmt"; break;
- case T_CommentStmt: return "CommentStmt"; break;
- case T_FetchStmt: return "FetchStmt"; break;
- case T_IndexStmt: return "IndexStmt"; break;
- case T_CreateFunctionStmt: return "CreateFunctionStmt"; break;
- case T_AlterFunctionStmt: return "AlterFunctionStmt"; break;
- case T_DoStmt: return "DoStmt"; break;
- case T_RenameStmt: return "RenameStmt"; break;
- case T_RuleStmt: return "RuleStmt"; break;
- case T_NotifyStmt: return "NotifyStmt"; break;
- case T_ListenStmt: return "ListenStmt"; break;
- case T_UnlistenStmt: return "UnlistenStmt"; break;
- case T_TransactionStmt: return "TransactionStmt"; break;
- case T_ViewStmt: return "ViewStmt"; break;
- case T_LoadStmt: return "LoadStmt"; break;
- case T_CreateDomainStmt: return "CreateDomainStmt"; break;
- case T_CreatedbStmt: return "CreatedbStmt"; break;
- case T_DropdbStmt: return "DropdbStmt"; break;
- case T_VacuumStmt: return "VacuumStmt"; break;
- case T_ExplainStmt: return "ExplainStmt"; break;
- case T_CreateTableAsStmt: return "CreateTableAsStmt"; break;
- case T_CreateSeqStmt: return "CreateSeqStmt"; break;
- case T_AlterSeqStmt: return "AlterSeqStmt"; break;
- case T_VariableSetStmt: return "VariableSetStmt"; break;
- case T_VariableShowStmt: return "VariableShowStmt"; break;
- case T_DiscardStmt: return "DiscardStmt"; break;
- case T_CreateTrigStmt: return "CreateTrigStmt"; break;
- case T_CreatePLangStmt: return "CreatePLangStmt"; break;
- case T_CreateRoleStmt: return "CreateRoleStmt"; break;
- case T_AlterRoleStmt: return "AlterRoleStmt"; break;
- case T_DropRoleStmt: return "DropRoleStmt"; break;
- case T_LockStmt: return "LockStmt"; break;
- case T_ConstraintsSetStmt: return "ConstraintsSetStmt"; break;
- case T_ReindexStmt: return "ReindexStmt"; break;
- case T_CheckPointStmt: return "CheckPointStmt"; break;
- case T_CreateSchemaStmt: return "CreateSchemaStmt"; break;
- case T_AlterDatabaseStmt: return "AlterDatabaseStmt"; break;
- case T_AlterDatabaseRefreshCollStmt: return "AlterDatabaseRefreshCollStmt"; break;
- case T_AlterDatabaseSetStmt: return "AlterDatabaseSetStmt"; break;
- case T_AlterRoleSetStmt: return "AlterRoleSetStmt"; break;
- case T_CreateConversionStmt: return "CreateConversionStmt"; break;
- case T_CreateCastStmt: return "CreateCastStmt"; break;
- case T_CreateOpClassStmt: return "CreateOpClassStmt"; break;
- case T_CreateOpFamilyStmt: return "CreateOpFamilyStmt"; break;
- case T_AlterOpFamilyStmt: return "AlterOpFamilyStmt"; break;
- case T_PrepareStmt: return "PrepareStmt"; break;
- case T_ExecuteStmt: return "ExecuteStmt"; break;
- case T_DeallocateStmt: return "DeallocateStmt"; break;
- case T_DeclareCursorStmt: return "DeclareCursorStmt"; break;
- case T_CreateTableSpaceStmt: return "CreateTableSpaceStmt"; break;
- case T_DropTableSpaceStmt: return "DropTableSpaceStmt"; break;
- case T_AlterObjectDependsStmt: return "AlterObjectDependsStmt"; break;
- case T_AlterObjectSchemaStmt: return "AlterObjectSchemaStmt"; break;
- case T_AlterOwnerStmt: return "AlterOwnerStmt"; break;
- case T_AlterOperatorStmt: return "AlterOperatorStmt"; break;
- case T_AlterTypeStmt: return "AlterTypeStmt"; break;
- case T_DropOwnedStmt: return "DropOwnedStmt"; break;
- case T_ReassignOwnedStmt: return "ReassignOwnedStmt"; break;
- case T_CompositeTypeStmt: return "CompositeTypeStmt"; break;
- case T_CreateEnumStmt: return "CreateEnumStmt"; break;
- case T_CreateRangeStmt: return "CreateRangeStmt"; break;
- case T_AlterEnumStmt: return "AlterEnumStmt"; break;
- case T_AlterTSDictionaryStmt: return "AlterTSDictionaryStmt"; break;
- case T_AlterTSConfigurationStmt: return "AlterTSConfigurationStmt"; break;
- case T_CreateFdwStmt: return "CreateFdwStmt"; break;
- case T_AlterFdwStmt: return "AlterFdwStmt"; break;
- case T_CreateForeignServerStmt: return "CreateForeignServerStmt"; break;
- case T_AlterForeignServerStmt: return "AlterForeignServerStmt"; break;
- case T_CreateUserMappingStmt: return "CreateUserMappingStmt"; break;
- case T_AlterUserMappingStmt: return "AlterUserMappingStmt"; break;
- case T_DropUserMappingStmt: return "DropUserMappingStmt"; break;
- case T_AlterTableSpaceOptionsStmt: return "AlterTableSpaceOptionsStmt"; break;
- case T_AlterTableMoveAllStmt: return "AlterTableMoveAllStmt"; break;
- case T_SecLabelStmt: return "SecLabelStmt"; break;
- case T_CreateForeignTableStmt: return "CreateForeignTableStmt"; break;
- case T_ImportForeignSchemaStmt: return "ImportForeignSchemaStmt"; break;
- case T_CreateExtensionStmt: return "CreateExtensionStmt"; break;
- case T_AlterExtensionStmt: return "AlterExtensionStmt"; break;
- case T_AlterExtensionContentsStmt: return "AlterExtensionContentsStmt"; break;
- case T_CreateEventTrigStmt: return "CreateEventTrigStmt"; break;
- case T_AlterEventTrigStmt: return "AlterEventTrigStmt"; break;
- case T_RefreshMatViewStmt: return "RefreshMatViewStmt"; break;
- case T_ReplicaIdentityStmt: return "ReplicaIdentityStmt"; break;
- case T_AlterSystemStmt: return "AlterSystemStmt"; break;
- case T_CreatePolicyStmt: return "CreatePolicyStmt"; break;
- case T_AlterPolicyStmt: return "AlterPolicyStmt"; break;
- case T_CreateTransformStmt: return "CreateTransformStmt"; break;
- case T_CreateAmStmt: return "CreateAmStmt"; break;
- case T_CreatePublicationStmt: return "CreatePublicationStmt"; break;
- case T_AlterPublicationStmt: return "AlterPublicationStmt"; break;
- case T_CreateSubscriptionStmt: return "CreateSubscriptionStmt"; break;
- case T_AlterSubscriptionStmt: return "AlterSubscriptionStmt"; break;
- case T_DropSubscriptionStmt: return "DropSubscriptionStmt"; break;
- case T_CreateStatsStmt: return "CreateStatsStmt"; break;
- case T_AlterCollationStmt: return "AlterCollationStmt"; break;
- case T_CallStmt: return "CallStmt"; break;
- case T_AlterStatsStmt: return "AlterStatsStmt"; break;
- case T_A_Expr: return "A_Expr"; break;
- case T_ColumnRef: return "ColumnRef"; break;
- case T_ParamRef: return "ParamRef"; break;
- case T_A_Const: return "A_Const"; break;
- case T_FuncCall: return "FuncCall"; break;
- case T_A_Star: return "A_Star"; break;
- case T_A_Indices: return "A_Indices"; break;
- case T_A_Indirection: return "A_Indirection"; break;
- case T_A_ArrayExpr: return "A_ArrayExpr"; break;
- case T_ResTarget: return "ResTarget"; break;
- case T_MultiAssignRef: return "MultiAssignRef"; break;
- case T_TypeCast: return "TypeCast"; break;
- case T_CollateClause: return "CollateClause"; break;
- case T_SortBy: return "SortBy"; break;
- case T_WindowDef: return "WindowDef"; break;
- case T_RangeSubselect: return "RangeSubselect"; break;
- case T_RangeFunction: return "RangeFunction"; break;
- case T_RangeTableSample: return "RangeTableSample"; break;
- case T_RangeTableFunc: return "RangeTableFunc"; break;
- case T_RangeTableFuncCol: return "RangeTableFuncCol"; break;
- case T_TypeName: return "TypeName"; break;
- case T_ColumnDef: return "ColumnDef"; break;
- case T_IndexElem: return "IndexElem"; break;
- case T_StatsElem: return "StatsElem"; break;
- case T_Constraint: return "Constraint"; break;
- case T_DefElem: return "DefElem"; break;
- case T_RangeTblEntry: return "RangeTblEntry"; break;
- case T_RangeTblFunction: return "RangeTblFunction"; break;
- case T_TableSampleClause: return "TableSampleClause"; break;
- case T_WithCheckOption: return "WithCheckOption"; break;
- case T_SortGroupClause: return "SortGroupClause"; break;
- case T_GroupingSet: return "GroupingSet"; break;
- case T_WindowClause: return "WindowClause"; break;
- case T_ObjectWithArgs: return "ObjectWithArgs"; break;
- case T_AccessPriv: return "AccessPriv"; break;
- case T_CreateOpClassItem: return "CreateOpClassItem"; break;
- case T_TableLikeClause: return "TableLikeClause"; break;
- case T_FunctionParameter: return "FunctionParameter"; break;
- case T_LockingClause: return "LockingClause"; break;
- case T_RowMarkClause: return "RowMarkClause"; break;
- case T_XmlSerialize: return "XmlSerialize"; break;
- case T_WithClause: return "WithClause"; break;
- case T_InferClause: return "InferClause"; break;
- case T_OnConflictClause: return "OnConflictClause"; break;
- case T_CTESearchClause: return "CTESearchClause"; break;
- case T_CTECycleClause: return "CTECycleClause"; break;
- case T_CommonTableExpr: return "CommonTableExpr"; break;
- case T_RoleSpec: return "RoleSpec"; break;
- case T_TriggerTransition: return "TriggerTransition"; break;
- case T_PartitionElem: return "PartitionElem"; break;
- case T_PartitionSpec: return "PartitionSpec"; break;
- case T_PartitionBoundSpec: return "PartitionBoundSpec"; break;
- case T_PartitionRangeDatum: return "PartitionRangeDatum"; break;
- case T_PartitionCmd: return "PartitionCmd"; break;
- case T_VacuumRelation: return "VacuumRelation"; break;
- case T_PublicationObjSpec: return "PublicationObjSpec"; break;
- case T_PublicationTable: return "PublicationTable"; break;
- case T_IdentifySystemCmd: return "IdentifySystemCmd"; break;
- case T_BaseBackupCmd: return "BaseBackupCmd"; break;
- case T_CreateReplicationSlotCmd: return "CreateReplicationSlotCmd"; break;
- case T_DropReplicationSlotCmd: return "DropReplicationSlotCmd"; break;
- case T_ReadReplicationSlotCmd: return "ReadReplicationSlotCmd"; break;
- case T_StartReplicationCmd: return "StartReplicationCmd"; break;
- case T_TimeLineHistoryCmd: return "TimeLineHistoryCmd"; break;
- case T_TriggerData: return "TriggerData"; break;
- case T_EventTriggerData: return "EventTriggerData"; break;
- case T_ReturnSetInfo: return "ReturnSetInfo"; break;
- case T_WindowObjectData: return "WindowObjectData"; break;
- case T_TIDBitmap: return "TIDBitmap"; break;
- case T_InlineCodeBlock: return "InlineCodeBlock"; break;
- case T_FdwRoutine: return "FdwRoutine"; break;
- case T_IndexAmRoutine: return "IndexAmRoutine"; break;
- case T_TableAmRoutine: return "TableAmRoutine"; break;
- case T_TsmRoutine: return "TsmRoutine"; break;
- case T_ForeignKeyCacheInfo: return "ForeignKeyCacheInfo"; break;
- case T_CallContext: return "CallContext"; break;
- case T_SupportRequestSimplify: return "SupportRequestSimplify"; break;
- case T_SupportRequestSelectivity: return "SupportRequestSelectivity"; break;
- case T_SupportRequestCost: return "SupportRequestCost"; break;
- case T_SupportRequestRows: return "SupportRequestRows"; break;
- case T_SupportRequestIndexCondition: return "SupportRequestIndexCondition"; break;
+ case T_Invalid:
+ return "Invalid";
+ break;
+ case T_IndexInfo:
+ return "IndexInfo";
+ break;
+ case T_ExprContext:
+ return "ExprContext";
+ break;
+ case T_ProjectionInfo:
+ return "ProjectionInfo";
+ break;
+ case T_JunkFilter:
+ return "JunkFilter";
+ break;
+ case T_OnConflictSetState:
+ return "OnConflictSetState";
+ break;
+ case T_ResultRelInfo:
+ return "ResultRelInfo";
+ break;
+ case T_EState:
+ return "EState";
+ break;
+ case T_TupleTableSlot:
+ return "TupleTableSlot";
+ break;
+ case T_Plan:
+ return "Plan";
+ break;
+ case T_Result:
+ return "Result";
+ break;
+ case T_ProjectSet:
+ return "ProjectSet";
+ break;
+ case T_ModifyTable:
+ return "ModifyTable";
+ break;
+ case T_Append:
+ return "Append";
+ break;
+ case T_MergeAppend:
+ return "MergeAppend";
+ break;
+ case T_RecursiveUnion:
+ return "RecursiveUnion";
+ break;
+ case T_BitmapAnd:
+ return "BitmapAnd";
+ break;
+ case T_BitmapOr:
+ return "BitmapOr";
+ break;
+ case T_Scan:
+ return "Scan";
+ break;
+ case T_SeqScan:
+ return "SeqScan";
+ break;
+ case T_SampleScan:
+ return "SampleScan";
+ break;
+ case T_IndexScan:
+ return "IndexScan";
+ break;
+ case T_IndexOnlyScan:
+ return "IndexOnlyScan";
+ break;
+ case T_BitmapIndexScan:
+ return "BitmapIndexScan";
+ break;
+ case T_BitmapHeapScan:
+ return "BitmapHeapScan";
+ break;
+ case T_TidScan:
+ return "TidScan";
+ break;
+ case T_TidRangeScan:
+ return "TidRangeScan";
+ break;
+ case T_SubqueryScan:
+ return "SubqueryScan";
+ break;
+ case T_FunctionScan:
+ return "FunctionScan";
+ break;
+ case T_ValuesScan:
+ return "ValuesScan";
+ break;
+ case T_TableFuncScan:
+ return "TableFuncScan";
+ break;
+ case T_CteScan:
+ return "CteScan";
+ break;
+ case T_NamedTuplestoreScan:
+ return "NamedTuplestoreScan";
+ break;
+ case T_WorkTableScan:
+ return "WorkTableScan";
+ break;
+ case T_ForeignScan:
+ return "ForeignScan";
+ break;
+ case T_CustomScan:
+ return "CustomScan";
+ break;
+ case T_Join:
+ return "Join";
+ break;
+ case T_NestLoop:
+ return "NestLoop";
+ break;
+ case T_MergeJoin:
+ return "MergeJoin";
+ break;
+ case T_HashJoin:
+ return "HashJoin";
+ break;
+ case T_Material:
+ return "Material";
+ break;
+ case T_Memoize:
+ return "Memoize";
+ break;
+ case T_Sort:
+ return "Sort";
+ break;
+ case T_IncrementalSort:
+ return "IncrementalSort";
+ break;
+ case T_Group:
+ return "Group";
+ break;
+ case T_Agg:
+ return "Agg";
+ break;
+ case T_WindowAgg:
+ return "WindowAgg";
+ break;
+ case T_Unique:
+ return "Unique";
+ break;
+ case T_Gather:
+ return "Gather";
+ break;
+ case T_GatherMerge:
+ return "GatherMerge";
+ break;
+ case T_Hash:
+ return "Hash";
+ break;
+ case T_SetOp:
+ return "SetOp";
+ break;
+ case T_LockRows:
+ return "LockRows";
+ break;
+ case T_Limit:
+ return "Limit";
+ break;
+ case T_NestLoopParam:
+ return "NestLoopParam";
+ break;
+ case T_PlanRowMark:
+ return "PlanRowMark";
+ break;
+ case T_PartitionPruneInfo:
+ return "PartitionPruneInfo";
+ break;
+ case T_PartitionedRelPruneInfo:
+ return "PartitionedRelPruneInfo";
+ break;
+ case T_PartitionPruneStepOp:
+ return "PartitionPruneStepOp";
+ break;
+ case T_PartitionPruneStepCombine:
+ return "PartitionPruneStepCombine";
+ break;
+ case T_PlanInvalItem:
+ return "PlanInvalItem";
+ break;
+ case T_PlanState:
+ return "PlanState";
+ break;
+ case T_ResultState:
+ return "ResultState";
+ break;
+ case T_ProjectSetState:
+ return "ProjectSetState";
+ break;
+ case T_ModifyTableState:
+ return "ModifyTableState";
+ break;
+ case T_AppendState:
+ return "AppendState";
+ break;
+ case T_MergeAppendState:
+ return "MergeAppendState";
+ break;
+ case T_RecursiveUnionState:
+ return "RecursiveUnionState";
+ break;
+ case T_BitmapAndState:
+ return "BitmapAndState";
+ break;
+ case T_BitmapOrState:
+ return "BitmapOrState";
+ break;
+ case T_ScanState:
+ return "ScanState";
+ break;
+ case T_SeqScanState:
+ return "SeqScanState";
+ break;
+ case T_SampleScanState:
+ return "SampleScanState";
+ break;
+ case T_IndexScanState:
+ return "IndexScanState";
+ break;
+ case T_IndexOnlyScanState:
+ return "IndexOnlyScanState";
+ break;
+ case T_BitmapIndexScanState:
+ return "BitmapIndexScanState";
+ break;
+ case T_BitmapHeapScanState:
+ return "BitmapHeapScanState";
+ break;
+ case T_TidScanState:
+ return "TidScanState";
+ break;
+ case T_TidRangeScanState:
+ return "TidRangeScanState";
+ break;
+ case T_SubqueryScanState:
+ return "SubqueryScanState";
+ break;
+ case T_FunctionScanState:
+ return "FunctionScanState";
+ break;
+ case T_TableFuncScanState:
+ return "TableFuncScanState";
+ break;
+ case T_ValuesScanState:
+ return "ValuesScanState";
+ break;
+ case T_CteScanState:
+ return "CteScanState";
+ break;
+ case T_NamedTuplestoreScanState:
+ return "NamedTuplestoreScanState";
+ break;
+ case T_WorkTableScanState:
+ return "WorkTableScanState";
+ break;
+ case T_ForeignScanState:
+ return "ForeignScanState";
+ break;
+ case T_CustomScanState:
+ return "CustomScanState";
+ break;
+ case T_JoinState:
+ return "JoinState";
+ break;
+ case T_NestLoopState:
+ return "NestLoopState";
+ break;
+ case T_MergeJoinState:
+ return "MergeJoinState";
+ break;
+ case T_HashJoinState:
+ return "HashJoinState";
+ break;
+ case T_MaterialState:
+ return "MaterialState";
+ break;
+ case T_MemoizeState:
+ return "MemoizeState";
+ break;
+ case T_SortState:
+ return "SortState";
+ break;
+ case T_IncrementalSortState:
+ return "IncrementalSortState";
+ break;
+ case T_GroupState:
+ return "GroupState";
+ break;
+ case T_AggState:
+ return "AggState";
+ break;
+ case T_WindowAggState:
+ return "WindowAggState";
+ break;
+ case T_UniqueState:
+ return "UniqueState";
+ break;
+ case T_GatherState:
+ return "GatherState";
+ break;
+ case T_GatherMergeState:
+ return "GatherMergeState";
+ break;
+ case T_HashState:
+ return "HashState";
+ break;
+ case T_SetOpState:
+ return "SetOpState";
+ break;
+ case T_LockRowsState:
+ return "LockRowsState";
+ break;
+ case T_LimitState:
+ return "LimitState";
+ break;
+ case T_Alias:
+ return "Alias";
+ break;
+ case T_RangeVar:
+ return "RangeVar";
+ break;
+ case T_TableFunc:
+ return "TableFunc";
+ break;
+ case T_Var:
+ return "Var";
+ break;
+ case T_Const:
+ return "Const";
+ break;
+ case T_Param:
+ return "Param";
+ break;
+ case T_Aggref:
+ return "Aggref";
+ break;
+ case T_GroupingFunc:
+ return "GroupingFunc";
+ break;
+ case T_WindowFunc:
+ return "WindowFunc";
+ break;
+ case T_SubscriptingRef:
+ return "SubscriptingRef";
+ break;
+ case T_FuncExpr:
+ return "FuncExpr";
+ break;
+ case T_NamedArgExpr:
+ return "NamedArgExpr";
+ break;
+ case T_OpExpr:
+ return "OpExpr";
+ break;
+ case T_DistinctExpr:
+ return "DistinctExpr";
+ break;
+ case T_NullIfExpr:
+ return "NullIfExpr";
+ break;
+ case T_ScalarArrayOpExpr:
+ return "ScalarArrayOpExpr";
+ break;
+ case T_BoolExpr:
+ return "BoolExpr";
+ break;
+ case T_SubLink:
+ return "SubLink";
+ break;
+ case T_SubPlan:
+ return "SubPlan";
+ break;
+ case T_AlternativeSubPlan:
+ return "AlternativeSubPlan";
+ break;
+ case T_FieldSelect:
+ return "FieldSelect";
+ break;
+ case T_FieldStore:
+ return "FieldStore";
+ break;
+ case T_RelabelType:
+ return "RelabelType";
+ break;
+ case T_CoerceViaIO:
+ return "CoerceViaIO";
+ break;
+ case T_ArrayCoerceExpr:
+ return "ArrayCoerceExpr";
+ break;
+ case T_ConvertRowtypeExpr:
+ return "ConvertRowtypeExpr";
+ break;
+ case T_CollateExpr:
+ return "CollateExpr";
+ break;
+ case T_CaseExpr:
+ return "CaseExpr";
+ break;
+ case T_CaseWhen:
+ return "CaseWhen";
+ break;
+ case T_CaseTestExpr:
+ return "CaseTestExpr";
+ break;
+ case T_ArrayExpr:
+ return "ArrayExpr";
+ break;
+ case T_RowExpr:
+ return "RowExpr";
+ break;
+ case T_RowCompareExpr:
+ return "RowCompareExpr";
+ break;
+ case T_CoalesceExpr:
+ return "CoalesceExpr";
+ break;
+ case T_MinMaxExpr:
+ return "MinMaxExpr";
+ break;
+ case T_SQLValueFunction:
+ return "SQLValueFunction";
+ break;
+ case T_XmlExpr:
+ return "XmlExpr";
+ break;
+ case T_NullTest:
+ return "NullTest";
+ break;
+ case T_BooleanTest:
+ return "BooleanTest";
+ break;
+ case T_CoerceToDomain:
+ return "CoerceToDomain";
+ break;
+ case T_CoerceToDomainValue:
+ return "CoerceToDomainValue";
+ break;
+ case T_SetToDefault:
+ return "SetToDefault";
+ break;
+ case T_CurrentOfExpr:
+ return "CurrentOfExpr";
+ break;
+ case T_NextValueExpr:
+ return "NextValueExpr";
+ break;
+ case T_InferenceElem:
+ return "InferenceElem";
+ break;
+ case T_TargetEntry:
+ return "TargetEntry";
+ break;
+ case T_RangeTblRef:
+ return "RangeTblRef";
+ break;
+ case T_JoinExpr:
+ return "JoinExpr";
+ break;
+ case T_FromExpr:
+ return "FromExpr";
+ break;
+ case T_OnConflictExpr:
+ return "OnConflictExpr";
+ break;
+ case T_IntoClause:
+ return "IntoClause";
+ break;
+ case T_ExprState:
+ return "ExprState";
+ break;
+ case T_WindowFuncExprState:
+ return "WindowFuncExprState";
+ break;
+ case T_SetExprState:
+ return "SetExprState";
+ break;
+ case T_SubPlanState:
+ return "SubPlanState";
+ break;
+ case T_DomainConstraintState:
+ return "DomainConstraintState";
+ break;
+ case T_PlannerInfo:
+ return "PlannerInfo";
+ break;
+ case T_PlannerGlobal:
+ return "PlannerGlobal";
+ break;
+ case T_RelOptInfo:
+ return "RelOptInfo";
+ break;
+ case T_IndexOptInfo:
+ return "IndexOptInfo";
+ break;
+ case T_ForeignKeyOptInfo:
+ return "ForeignKeyOptInfo";
+ break;
+ case T_ParamPathInfo:
+ return "ParamPathInfo";
+ break;
+ case T_Path:
+ return "Path";
+ break;
+ case T_IndexPath:
+ return "IndexPath";
+ break;
+ case T_BitmapHeapPath:
+ return "BitmapHeapPath";
+ break;
+ case T_BitmapAndPath:
+ return "BitmapAndPath";
+ break;
+ case T_BitmapOrPath:
+ return "BitmapOrPath";
+ break;
+ case T_TidPath:
+ return "TidPath";
+ break;
+ case T_TidRangePath:
+ return "TidRangePath";
+ break;
+ case T_SubqueryScanPath:
+ return "SubqueryScanPath";
+ break;
+ case T_ForeignPath:
+ return "ForeignPath";
+ break;
+ case T_CustomPath:
+ return "CustomPath";
+ break;
+ case T_NestPath:
+ return "NestPath";
+ break;
+ case T_MergePath:
+ return "MergePath";
+ break;
+ case T_HashPath:
+ return "HashPath";
+ break;
+ case T_AppendPath:
+ return "AppendPath";
+ break;
+ case T_MergeAppendPath:
+ return "MergeAppendPath";
+ break;
+ case T_GroupResultPath:
+ return "GroupResultPath";
+ break;
+ case T_MaterialPath:
+ return "MaterialPath";
+ break;
+ case T_MemoizePath:
+ return "MemoizePath";
+ break;
+ case T_UniquePath:
+ return "UniquePath";
+ break;
+ case T_GatherPath:
+ return "GatherPath";
+ break;
+ case T_GatherMergePath:
+ return "GatherMergePath";
+ break;
+ case T_ProjectionPath:
+ return "ProjectionPath";
+ break;
+ case T_ProjectSetPath:
+ return "ProjectSetPath";
+ break;
+ case T_SortPath:
+ return "SortPath";
+ break;
+ case T_IncrementalSortPath:
+ return "IncrementalSortPath";
+ break;
+ case T_GroupPath:
+ return "GroupPath";
+ break;
+ case T_UpperUniquePath:
+ return "UpperUniquePath";
+ break;
+ case T_AggPath:
+ return "AggPath";
+ break;
+ case T_GroupingSetsPath:
+ return "GroupingSetsPath";
+ break;
+ case T_MinMaxAggPath:
+ return "MinMaxAggPath";
+ break;
+ case T_WindowAggPath:
+ return "WindowAggPath";
+ break;
+ case T_SetOpPath:
+ return "SetOpPath";
+ break;
+ case T_RecursiveUnionPath:
+ return "RecursiveUnionPath";
+ break;
+ case T_LockRowsPath:
+ return "LockRowsPath";
+ break;
+ case T_ModifyTablePath:
+ return "ModifyTablePath";
+ break;
+ case T_LimitPath:
+ return "LimitPath";
+ break;
+ case T_EquivalenceClass:
+ return "EquivalenceClass";
+ break;
+ case T_EquivalenceMember:
+ return "EquivalenceMember";
+ break;
+ case T_PathKey:
+ return "PathKey";
+ break;
+ case T_PathTarget:
+ return "PathTarget";
+ break;
+ case T_RestrictInfo:
+ return "RestrictInfo";
+ break;
+ case T_IndexClause:
+ return "IndexClause";
+ break;
+ case T_PlaceHolderVar:
+ return "PlaceHolderVar";
+ break;
+ case T_SpecialJoinInfo:
+ return "SpecialJoinInfo";
+ break;
+ case T_AppendRelInfo:
+ return "AppendRelInfo";
+ break;
+ case T_RowIdentityVarInfo:
+ return "RowIdentityVarInfo";
+ break;
+ case T_PlaceHolderInfo:
+ return "PlaceHolderInfo";
+ break;
+ case T_MinMaxAggInfo:
+ return "MinMaxAggInfo";
+ break;
+ case T_PlannerParamItem:
+ return "PlannerParamItem";
+ break;
+ case T_RollupData:
+ return "RollupData";
+ break;
+ case T_GroupingSetData:
+ return "GroupingSetData";
+ break;
+ case T_StatisticExtInfo:
+ return "StatisticExtInfo";
+ break;
+ case T_AllocSetContext:
+ return "AllocSetContext";
+ break;
+ case T_SlabContext:
+ return "SlabContext";
+ break;
+ case T_GenerationContext:
+ return "GenerationContext";
+ break;
+ case T_Integer:
+ return "Integer";
+ break;
+ case T_Float:
+ return "Float";
+ break;
+ case T_Boolean:
+ return "Boolean";
+ break;
+ case T_String:
+ return "String";
+ break;
+ case T_BitString:
+ return "BitString";
+ break;
+ case T_List:
+ return "List";
+ break;
+ case T_IntList:
+ return "IntList";
+ break;
+ case T_OidList:
+ return "OidList";
+ break;
+ case T_ExtensibleNode:
+ return "ExtensibleNode";
+ break;
+ case T_RawStmt:
+ return "RawStmt";
+ break;
+ case T_Query:
+ return "Query";
+ break;
+ case T_PlannedStmt:
+ return "PlannedStmt";
+ break;
+ case T_InsertStmt:
+ return "InsertStmt";
+ break;
+ case T_DeleteStmt:
+ return "DeleteStmt";
+ break;
+ case T_UpdateStmt:
+ return "UpdateStmt";
+ break;
+ case T_SelectStmt:
+ return "SelectStmt";
+ break;
+ case T_ReturnStmt:
+ return "ReturnStmt";
+ break;
+ case T_PLAssignStmt:
+ return "PLAssignStmt";
+ break;
+ case T_AlterTableStmt:
+ return "AlterTableStmt";
+ break;
+ case T_AlterTableCmd:
+ return "AlterTableCmd";
+ break;
+ case T_AlterDomainStmt:
+ return "AlterDomainStmt";
+ break;
+ case T_SetOperationStmt:
+ return "SetOperationStmt";
+ break;
+ case T_GrantStmt:
+ return "GrantStmt";
+ break;
+ case T_GrantRoleStmt:
+ return "GrantRoleStmt";
+ break;
+ case T_AlterDefaultPrivilegesStmt:
+ return "AlterDefaultPrivilegesStmt";
+ break;
+ case T_ClosePortalStmt:
+ return "ClosePortalStmt";
+ break;
+ case T_ClusterStmt:
+ return "ClusterStmt";
+ break;
+ case T_CopyStmt:
+ return "CopyStmt";
+ break;
+ case T_CreateStmt:
+ return "CreateStmt";
+ break;
+ case T_DefineStmt:
+ return "DefineStmt";
+ break;
+ case T_DropStmt:
+ return "DropStmt";
+ break;
+ case T_TruncateStmt:
+ return "TruncateStmt";
+ break;
+ case T_CommentStmt:
+ return "CommentStmt";
+ break;
+ case T_FetchStmt:
+ return "FetchStmt";
+ break;
+ case T_IndexStmt:
+ return "IndexStmt";
+ break;
+ case T_CreateFunctionStmt:
+ return "CreateFunctionStmt";
+ break;
+ case T_AlterFunctionStmt:
+ return "AlterFunctionStmt";
+ break;
+ case T_DoStmt:
+ return "DoStmt";
+ break;
+ case T_RenameStmt:
+ return "RenameStmt";
+ break;
+ case T_RuleStmt:
+ return "RuleStmt";
+ break;
+ case T_NotifyStmt:
+ return "NotifyStmt";
+ break;
+ case T_ListenStmt:
+ return "ListenStmt";
+ break;
+ case T_UnlistenStmt:
+ return "UnlistenStmt";
+ break;
+ case T_TransactionStmt:
+ return "TransactionStmt";
+ break;
+ case T_ViewStmt:
+ return "ViewStmt";
+ break;
+ case T_LoadStmt:
+ return "LoadStmt";
+ break;
+ case T_CreateDomainStmt:
+ return "CreateDomainStmt";
+ break;
+ case T_CreatedbStmt:
+ return "CreatedbStmt";
+ break;
+ case T_DropdbStmt:
+ return "DropdbStmt";
+ break;
+ case T_VacuumStmt:
+ return "VacuumStmt";
+ break;
+ case T_ExplainStmt:
+ return "ExplainStmt";
+ break;
+ case T_CreateTableAsStmt:
+ return "CreateTableAsStmt";
+ break;
+ case T_CreateSeqStmt:
+ return "CreateSeqStmt";
+ break;
+ case T_AlterSeqStmt:
+ return "AlterSeqStmt";
+ break;
+ case T_VariableSetStmt:
+ return "VariableSetStmt";
+ break;
+ case T_VariableShowStmt:
+ return "VariableShowStmt";
+ break;
+ case T_DiscardStmt:
+ return "DiscardStmt";
+ break;
+ case T_CreateTrigStmt:
+ return "CreateTrigStmt";
+ break;
+ case T_CreatePLangStmt:
+ return "CreatePLangStmt";
+ break;
+ case T_CreateRoleStmt:
+ return "CreateRoleStmt";
+ break;
+ case T_AlterRoleStmt:
+ return "AlterRoleStmt";
+ break;
+ case T_DropRoleStmt:
+ return "DropRoleStmt";
+ break;
+ case T_LockStmt:
+ return "LockStmt";
+ break;
+ case T_ConstraintsSetStmt:
+ return "ConstraintsSetStmt";
+ break;
+ case T_ReindexStmt:
+ return "ReindexStmt";
+ break;
+ case T_CheckPointStmt:
+ return "CheckPointStmt";
+ break;
+ case T_CreateSchemaStmt:
+ return "CreateSchemaStmt";
+ break;
+ case T_AlterDatabaseStmt:
+ return "AlterDatabaseStmt";
+ break;
+ case T_AlterDatabaseRefreshCollStmt:
+ return "AlterDatabaseRefreshCollStmt";
+ break;
+ case T_AlterDatabaseSetStmt:
+ return "AlterDatabaseSetStmt";
+ break;
+ case T_AlterRoleSetStmt:
+ return "AlterRoleSetStmt";
+ break;
+ case T_CreateConversionStmt:
+ return "CreateConversionStmt";
+ break;
+ case T_CreateCastStmt:
+ return "CreateCastStmt";
+ break;
+ case T_CreateOpClassStmt:
+ return "CreateOpClassStmt";
+ break;
+ case T_CreateOpFamilyStmt:
+ return "CreateOpFamilyStmt";
+ break;
+ case T_AlterOpFamilyStmt:
+ return "AlterOpFamilyStmt";
+ break;
+ case T_PrepareStmt:
+ return "PrepareStmt";
+ break;
+ case T_ExecuteStmt:
+ return "ExecuteStmt";
+ break;
+ case T_DeallocateStmt:
+ return "DeallocateStmt";
+ break;
+ case T_DeclareCursorStmt:
+ return "DeclareCursorStmt";
+ break;
+ case T_CreateTableSpaceStmt:
+ return "CreateTableSpaceStmt";
+ break;
+ case T_DropTableSpaceStmt:
+ return "DropTableSpaceStmt";
+ break;
+ case T_AlterObjectDependsStmt:
+ return "AlterObjectDependsStmt";
+ break;
+ case T_AlterObjectSchemaStmt:
+ return "AlterObjectSchemaStmt";
+ break;
+ case T_AlterOwnerStmt:
+ return "AlterOwnerStmt";
+ break;
+ case T_AlterOperatorStmt:
+ return "AlterOperatorStmt";
+ break;
+ case T_AlterTypeStmt:
+ return "AlterTypeStmt";
+ break;
+ case T_DropOwnedStmt:
+ return "DropOwnedStmt";
+ break;
+ case T_ReassignOwnedStmt:
+ return "ReassignOwnedStmt";
+ break;
+ case T_CompositeTypeStmt:
+ return "CompositeTypeStmt";
+ break;
+ case T_CreateEnumStmt:
+ return "CreateEnumStmt";
+ break;
+ case T_CreateRangeStmt:
+ return "CreateRangeStmt";
+ break;
+ case T_AlterEnumStmt:
+ return "AlterEnumStmt";
+ break;
+ case T_AlterTSDictionaryStmt:
+ return "AlterTSDictionaryStmt";
+ break;
+ case T_AlterTSConfigurationStmt:
+ return "AlterTSConfigurationStmt";
+ break;
+ case T_CreateFdwStmt:
+ return "CreateFdwStmt";
+ break;
+ case T_AlterFdwStmt:
+ return "AlterFdwStmt";
+ break;
+ case T_CreateForeignServerStmt:
+ return "CreateForeignServerStmt";
+ break;
+ case T_AlterForeignServerStmt:
+ return "AlterForeignServerStmt";
+ break;
+ case T_CreateUserMappingStmt:
+ return "CreateUserMappingStmt";
+ break;
+ case T_AlterUserMappingStmt:
+ return "AlterUserMappingStmt";
+ break;
+ case T_DropUserMappingStmt:
+ return "DropUserMappingStmt";
+ break;
+ case T_AlterTableSpaceOptionsStmt:
+ return "AlterTableSpaceOptionsStmt";
+ break;
+ case T_AlterTableMoveAllStmt:
+ return "AlterTableMoveAllStmt";
+ break;
+ case T_SecLabelStmt:
+ return "SecLabelStmt";
+ break;
+ case T_CreateForeignTableStmt:
+ return "CreateForeignTableStmt";
+ break;
+ case T_ImportForeignSchemaStmt:
+ return "ImportForeignSchemaStmt";
+ break;
+ case T_CreateExtensionStmt:
+ return "CreateExtensionStmt";
+ break;
+ case T_AlterExtensionStmt:
+ return "AlterExtensionStmt";
+ break;
+ case T_AlterExtensionContentsStmt:
+ return "AlterExtensionContentsStmt";
+ break;
+ case T_CreateEventTrigStmt:
+ return "CreateEventTrigStmt";
+ break;
+ case T_AlterEventTrigStmt:
+ return "AlterEventTrigStmt";
+ break;
+ case T_RefreshMatViewStmt:
+ return "RefreshMatViewStmt";
+ break;
+ case T_ReplicaIdentityStmt:
+ return "ReplicaIdentityStmt";
+ break;
+ case T_AlterSystemStmt:
+ return "AlterSystemStmt";
+ break;
+ case T_CreatePolicyStmt:
+ return "CreatePolicyStmt";
+ break;
+ case T_AlterPolicyStmt:
+ return "AlterPolicyStmt";
+ break;
+ case T_CreateTransformStmt:
+ return "CreateTransformStmt";
+ break;
+ case T_CreateAmStmt:
+ return "CreateAmStmt";
+ break;
+ case T_CreatePublicationStmt:
+ return "CreatePublicationStmt";
+ break;
+ case T_AlterPublicationStmt:
+ return "AlterPublicationStmt";
+ break;
+ case T_CreateSubscriptionStmt:
+ return "CreateSubscriptionStmt";
+ break;
+ case T_AlterSubscriptionStmt:
+ return "AlterSubscriptionStmt";
+ break;
+ case T_DropSubscriptionStmt:
+ return "DropSubscriptionStmt";
+ break;
+ case T_CreateStatsStmt:
+ return "CreateStatsStmt";
+ break;
+ case T_AlterCollationStmt:
+ return "AlterCollationStmt";
+ break;
+ case T_CallStmt:
+ return "CallStmt";
+ break;
+ case T_AlterStatsStmt:
+ return "AlterStatsStmt";
+ break;
+ case T_A_Expr:
+ return "A_Expr";
+ break;
+ case T_ColumnRef:
+ return "ColumnRef";
+ break;
+ case T_ParamRef:
+ return "ParamRef";
+ break;
+ case T_A_Const:
+ return "A_Const";
+ break;
+ case T_FuncCall:
+ return "FuncCall";
+ break;
+ case T_A_Star:
+ return "A_Star";
+ break;
+ case T_A_Indices:
+ return "A_Indices";
+ break;
+ case T_A_Indirection:
+ return "A_Indirection";
+ break;
+ case T_A_ArrayExpr:
+ return "A_ArrayExpr";
+ break;
+ case T_ResTarget:
+ return "ResTarget";
+ break;
+ case T_MultiAssignRef:
+ return "MultiAssignRef";
+ break;
+ case T_TypeCast:
+ return "TypeCast";
+ break;
+ case T_CollateClause:
+ return "CollateClause";
+ break;
+ case T_SortBy:
+ return "SortBy";
+ break;
+ case T_WindowDef:
+ return "WindowDef";
+ break;
+ case T_RangeSubselect:
+ return "RangeSubselect";
+ break;
+ case T_RangeFunction:
+ return "RangeFunction";
+ break;
+ case T_RangeTableSample:
+ return "RangeTableSample";
+ break;
+ case T_RangeTableFunc:
+ return "RangeTableFunc";
+ break;
+ case T_RangeTableFuncCol:
+ return "RangeTableFuncCol";
+ break;
+ case T_TypeName:
+ return "TypeName";
+ break;
+ case T_ColumnDef:
+ return "ColumnDef";
+ break;
+ case T_IndexElem:
+ return "IndexElem";
+ break;
+ case T_StatsElem:
+ return "StatsElem";
+ break;
+ case T_Constraint:
+ return "Constraint";
+ break;
+ case T_DefElem:
+ return "DefElem";
+ break;
+ case T_RangeTblEntry:
+ return "RangeTblEntry";
+ break;
+ case T_RangeTblFunction:
+ return "RangeTblFunction";
+ break;
+ case T_TableSampleClause:
+ return "TableSampleClause";
+ break;
+ case T_WithCheckOption:
+ return "WithCheckOption";
+ break;
+ case T_SortGroupClause:
+ return "SortGroupClause";
+ break;
+ case T_GroupingSet:
+ return "GroupingSet";
+ break;
+ case T_WindowClause:
+ return "WindowClause";
+ break;
+ case T_ObjectWithArgs:
+ return "ObjectWithArgs";
+ break;
+ case T_AccessPriv:
+ return "AccessPriv";
+ break;
+ case T_CreateOpClassItem:
+ return "CreateOpClassItem";
+ break;
+ case T_TableLikeClause:
+ return "TableLikeClause";
+ break;
+ case T_FunctionParameter:
+ return "FunctionParameter";
+ break;
+ case T_LockingClause:
+ return "LockingClause";
+ break;
+ case T_RowMarkClause:
+ return "RowMarkClause";
+ break;
+ case T_XmlSerialize:
+ return "XmlSerialize";
+ break;
+ case T_WithClause:
+ return "WithClause";
+ break;
+ case T_InferClause:
+ return "InferClause";
+ break;
+ case T_OnConflictClause:
+ return "OnConflictClause";
+ break;
+ case T_CTESearchClause:
+ return "CTESearchClause";
+ break;
+ case T_CTECycleClause:
+ return "CTECycleClause";
+ break;
+ case T_CommonTableExpr:
+ return "CommonTableExpr";
+ break;
+ case T_RoleSpec:
+ return "RoleSpec";
+ break;
+ case T_TriggerTransition:
+ return "TriggerTransition";
+ break;
+ case T_PartitionElem:
+ return "PartitionElem";
+ break;
+ case T_PartitionSpec:
+ return "PartitionSpec";
+ break;
+ case T_PartitionBoundSpec:
+ return "PartitionBoundSpec";
+ break;
+ case T_PartitionRangeDatum:
+ return "PartitionRangeDatum";
+ break;
+ case T_PartitionCmd:
+ return "PartitionCmd";
+ break;
+ case T_VacuumRelation:
+ return "VacuumRelation";
+ break;
+ case T_PublicationObjSpec:
+ return "PublicationObjSpec";
+ break;
+ case T_PublicationTable:
+ return "PublicationTable";
+ break;
+ case T_IdentifySystemCmd:
+ return "IdentifySystemCmd";
+ break;
+ case T_BaseBackupCmd:
+ return "BaseBackupCmd";
+ break;
+ case T_CreateReplicationSlotCmd:
+ return "CreateReplicationSlotCmd";
+ break;
+ case T_DropReplicationSlotCmd:
+ return "DropReplicationSlotCmd";
+ break;
+ case T_ReadReplicationSlotCmd:
+ return "ReadReplicationSlotCmd";
+ break;
+ case T_StartReplicationCmd:
+ return "StartReplicationCmd";
+ break;
+ case T_TimeLineHistoryCmd:
+ return "TimeLineHistoryCmd";
+ break;
+ case T_TriggerData:
+ return "TriggerData";
+ break;
+ case T_EventTriggerData:
+ return "EventTriggerData";
+ break;
+ case T_ReturnSetInfo:
+ return "ReturnSetInfo";
+ break;
+ case T_WindowObjectData:
+ return "WindowObjectData";
+ break;
+ case T_TIDBitmap:
+ return "TIDBitmap";
+ break;
+ case T_InlineCodeBlock:
+ return "InlineCodeBlock";
+ break;
+ case T_FdwRoutine:
+ return "FdwRoutine";
+ break;
+ case T_IndexAmRoutine:
+ return "IndexAmRoutine";
+ break;
+ case T_TableAmRoutine:
+ return "TableAmRoutine";
+ break;
+ case T_TsmRoutine:
+ return "TsmRoutine";
+ break;
+ case T_ForeignKeyCacheInfo:
+ return "ForeignKeyCacheInfo";
+ break;
+ case T_CallContext:
+ return "CallContext";
+ break;
+ case T_SupportRequestSimplify:
+ return "SupportRequestSimplify";
+ break;
+ case T_SupportRequestSelectivity:
+ return "SupportRequestSelectivity";
+ break;
+ case T_SupportRequestCost:
+ return "SupportRequestCost";
+ break;
+ case T_SupportRequestRows:
+ return "SupportRequestRows";
+ break;
+ case T_SupportRequestIndexCondition:
+ return "SupportRequestIndexCondition";
+ break;
default:
break;
}
@@ -928,45 +1792,46 @@ accesstype_arg_to_string(ObjectAccessType access, void *arg)
{
case OAT_POST_CREATE:
{
- ObjectAccessPostCreate *pc_arg = (ObjectAccessPostCreate *)arg;
+ ObjectAccessPostCreate *pc_arg = (ObjectAccessPostCreate *) arg;
+
return pstrdup(pc_arg->is_internal ? "internal" : "explicit");
}
break;
case OAT_DROP:
{
- ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
+ ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
return psprintf("%s%s%s%s%s%s",
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "internal action," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "concurrent drop," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "suppress notices," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "keep original object," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "keep extensions," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "normal concurrent drop," : ""));
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "internal action," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "concurrent drop," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "suppress notices," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "keep original object," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "keep extensions," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "normal concurrent drop," : ""));
}
break;
case OAT_POST_ALTER:
{
- ObjectAccessPostAlter *pa_arg = (ObjectAccessPostAlter*)arg;
+ ObjectAccessPostAlter *pa_arg = (ObjectAccessPostAlter *) arg;
return psprintf("%s %s auxiliary object",
- (pa_arg->is_internal ? "internal" : "explicit"),
- (OidIsValid(pa_arg->auxiliary_id) ? "with" : "without"));
+ (pa_arg->is_internal ? "internal" : "explicit"),
+ (OidIsValid(pa_arg->auxiliary_id) ? "with" : "without"));
}
break;
case OAT_NAMESPACE_SEARCH:
{
- ObjectAccessNamespaceSearch *ns_arg = (ObjectAccessNamespaceSearch *)arg;
+ ObjectAccessNamespaceSearch *ns_arg = (ObjectAccessNamespaceSearch *) arg;
return psprintf("%s, %s",
- (ns_arg->ereport_on_violation ? "report on violation" : "no report on violation"),
- (ns_arg->result ? "allowed" : "denied"));
+ (ns_arg->ereport_on_violation ? "report on violation" : "no report on violation"),
+ (ns_arg->result ? "allowed" : "denied"));
}
break;
case OAT_TRUNCATE:
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
index d842f934a3a..f5da6bf46d6 100644
--- a/src/test/modules/test_pg_dump/t/001_base.pl
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
###############################################################
# This structure is based off of the src/bin/pg_dump/t test
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 9a2ada0a103..f842be1a72b 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -135,8 +135,8 @@ INIT
$test_pghost = PostgreSQL::Test::Utils::tempdir_short;
$test_pghost =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os;
}
- $ENV{PGHOST} = $test_pghost;
- $ENV{PGDATABASE} = 'postgres';
+ $ENV{PGHOST} = $test_pghost;
+ $ENV{PGDATABASE} = 'postgres';
# Tracking of last port value assigned to accelerate free port lookup.
$last_port_assigned = int(rand() * 16384) + 49152;
@@ -409,8 +409,10 @@ sub set_replication_conf
or croak "set_replication_conf only works with the default host";
open my $hba, '>>', "$pgdata/pg_hba.conf";
- print $hba "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
- if ($PostgreSQL::Test::Utils::windows_os && !$PostgreSQL::Test::Utils::use_unix_sockets)
+ print $hba
+ "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
+ if ($PostgreSQL::Test::Utils::windows_os
+ && !$PostgreSQL::Test::Utils::use_unix_sockets)
{
print $hba
"host replication all $test_localhost/32 sspi include_realm=1 map=regress\n";
@@ -459,10 +461,10 @@ sub init
mkdir $self->backup_dir;
mkdir $self->archive_dir;
- PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A', 'trust', '-N',
- @{ $params{extra} });
- PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS}, '--config-auth', $pgdata,
- @{ $params{auth_extra} });
+ PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A',
+ 'trust', '-N', @{ $params{extra} });
+ PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS},
+ '--config-auth', $pgdata, @{ $params{auth_extra} });
open my $conf, '>>', "$pgdata/postgresql.conf";
print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
@@ -575,7 +577,7 @@ sub adjust_conf
my $conffile = $self->data_dir . '/' . $filename;
my $contents = PostgreSQL::Test::Utils::slurp_file($conffile);
- my @lines = split(/\n/, $contents);
+ my @lines = split(/\n/, $contents);
my @result;
my $eq = $skip_equals ? '' : '= ';
foreach my $line (@lines)
@@ -809,8 +811,10 @@ sub start
# sub init) so that it does not get copied to standbys.
# -w is now the default but having it here does no harm and helps
# compatibility with older versions.
- $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D', $self->data_dir, '-l',
- $self->logfile, '-o', "--cluster-name=$name", 'start');
+ $ret = PostgreSQL::Test::Utils::system_log(
+ 'pg_ctl', '-w', '-D', $self->data_dir,
+ '-l', $self->logfile, '-o', "--cluster-name=$name",
+ 'start');
if ($ret != 0)
{
@@ -919,7 +923,8 @@ sub reload
local %ENV = $self->_get_env();
print "### Reloading node \"$name\"\n";
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, 'reload');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata,
+ 'reload');
return;
}
@@ -945,8 +950,8 @@ sub restart
# -w is now the default but having it here does no harm and helps
# compatibility with older versions.
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-w', '-D', $pgdata, '-l', $logfile,
- 'restart');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-w', '-D', $pgdata,
+ '-l', $logfile, 'restart');
$self->_update_pid(1);
return;
@@ -971,8 +976,8 @@ sub promote
local %ENV = $self->_get_env();
print "### Promoting node \"$name\"\n";
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'promote');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+ $logfile, 'promote');
return;
}
@@ -995,8 +1000,8 @@ sub logrotate
local %ENV = $self->_get_env();
print "### Rotating log in node \"$name\"\n";
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'logrotate');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+ $logfile, 'logrotate');
return;
}
@@ -1232,13 +1237,16 @@ sub new
my $testname = basename($0);
$testname =~ s/\.[^.]+$//;
my $node = {
- _port => $port,
- _host => $host,
- _basedir => "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data",
- _name => $name,
+ _port => $port,
+ _host => $host,
+ _basedir =>
+ "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data",
+ _name => $name,
_logfile_generation => 0,
- _logfile_base => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}",
- _logfile => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}.log"
+ _logfile_base =>
+ "$PostgreSQL::Test::Utils::log_path/${testname}_${name}",
+ _logfile =>
+ "$PostgreSQL::Test::Utils::log_path/${testname}_${name}.log"
};
if ($params{install_path})
@@ -1261,8 +1269,8 @@ sub new
# isn't fully compatible. Warn if the version is too old and thus we don't
# have a subclass of this class.
if (ref $ver && $ver < $min_compat)
- {
- my $maj = $ver->major(separator => '_');
+ {
+ my $maj = $ver->major(separator => '_');
my $subclass = $class . "::V_$maj";
if ($subclass->isa($class))
{
@@ -1270,9 +1278,10 @@ sub new
}
else
{
- carp "PostgreSQL::Test::Cluster isn't fully compatible with version $ver";
+ carp
+ "PostgreSQL::Test::Cluster isn't fully compatible with version $ver";
}
- }
+ }
# Add node to list of nodes
push(@all_nodes, $node);
@@ -1528,7 +1537,8 @@ END
next if defined $ENV{'PG_TEST_NOCLEAN'};
# clean basedir on clean test invocation
- $node->clean_node if $exit_code == 0 && PostgreSQL::Test::Utils::all_tests_passing();
+ $node->clean_node
+ if $exit_code == 0 && PostgreSQL::Test::Utils::all_tests_passing();
}
$? = $exit_code;
@@ -2178,7 +2188,8 @@ sub connect_ok
if (@log_like or @log_unlike)
{
- my $log_contents = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+ my $log_contents =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
while (my $regex = shift @log_like)
{
@@ -2248,7 +2259,8 @@ sub connect_fails
if (@log_like or @log_unlike)
{
- my $log_contents = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+ my $log_contents =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
while (my $regex = shift @log_like)
{
@@ -2444,7 +2456,8 @@ sub issues_sql_like
my $result = PostgreSQL::Test::Utils::run_log($cmd);
ok($result, "@$cmd exit code 0");
- my $log = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+ my $log =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
like($log, $expected_sql, "$test_name: SQL found in server log");
return;
}
@@ -2550,7 +2563,8 @@ sub wait_for_catchup
unless exists($valid_modes{$mode});
# Allow passing of a PostgreSQL::Test::Cluster instance as shorthand
- if (blessed($standby_name) && $standby_name->isa("PostgreSQL::Test::Cluster"))
+ if (blessed($standby_name)
+ && $standby_name->isa("PostgreSQL::Test::Cluster"))
{
$standby_name = $standby_name->name;
}
@@ -2566,8 +2580,7 @@ sub wait_for_catchup
. $self->name . "\n";
# Before release 12 walreceiver just set the application name to
# "walreceiver"
- my $query =
- qq[SELECT '$target_lsn' <= ${mode}_lsn AND state = 'streaming'
+ my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn AND state = 'streaming'
FROM pg_catalog.pg_stat_replication
WHERE application_name IN ('$standby_name', 'walreceiver')];
$self->poll_query_until('postgres', $query)
@@ -2641,9 +2654,10 @@ sub wait_for_log
while ($attempts < $max_attempts)
{
- my $log = PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
+ my $log =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
- return $offset+length($log) if ($log =~ m/$regexp/);
+ return $offset + length($log) if ($log =~ m/$regexp/);
# Wait 0.1 second before retrying.
usleep(100_000);
@@ -2858,7 +2872,8 @@ sub corrupt_page_checksum
##########################################################################
-package PostgreSQL::Test::Cluster::V_11; ## no critic (ProhibitMultiplePackages)
+package PostgreSQL::Test::Cluster::V_11
+ ; ## no critic (ProhibitMultiplePackages)
# parent.pm is not present in all perl versions before 5.10.1, so instead
# do directly what it would do for this:
@@ -2874,21 +2889,22 @@ sub _recovery_file { return "recovery.conf"; }
sub set_standby_mode
{
- my $self = shift;
- $self->append_conf("recovery.conf", "standby_mode = on\n");
+ my $self = shift;
+ $self->append_conf("recovery.conf", "standby_mode = on\n");
}
sub init
{
- my ($self, %params) = @_;
- $self->SUPER::init(%params);
- $self->adjust_conf('postgresql.conf', 'max_wal_senders',
- $params{allows_streaming} ? 5 : 0);
+ my ($self, %params) = @_;
+ $self->SUPER::init(%params);
+ $self->adjust_conf('postgresql.conf', 'max_wal_senders',
+ $params{allows_streaming} ? 5 : 0);
}
##########################################################################
-package PostgreSQL::Test::Cluster::V_10; ## no critic (ProhibitMultiplePackages)
+package PostgreSQL::Test::Cluster::V_10
+ ; ## no critic (ProhibitMultiplePackages)
# use parent -norequire, qw(PostgreSQL::Test::Cluster::V_11);
push @PostgreSQL::Test::Cluster::V_10::ISA, 'PostgreSQL::Test::Cluster::V_11';
diff --git a/src/test/perl/PostgreSQL/Test/SimpleTee.pm b/src/test/perl/PostgreSQL/Test/SimpleTee.pm
index 7cb8591fed2..ec13714c331 100644
--- a/src/test/perl/PostgreSQL/Test/SimpleTee.pm
+++ b/src/test/perl/PostgreSQL/Test/SimpleTee.pm
@@ -27,13 +27,13 @@ BEGIN { $last_time = time; }
sub _time_str
{
- my $tm = time;
+ my $tm = time;
my $diff = $tm - $last_time;
$last_time = $tm;
my ($sec, $min, $hour) = localtime($tm);
my $msec = int(1000 * ($tm - int($tm)));
return sprintf("[%.2d:%.2d:%.2d.%.3d](%.3fs) ",
- $hour, $min, $sec, $msec, $diff);
+ $hour, $min, $sec, $msec, $diff);
}
sub TIEHANDLE
@@ -50,11 +50,11 @@ sub PRINT
# the original stdout, which is what PROVE sees. Additional decorations
# confuse it, so only put out the time string on files after the first.
my $skip = 1;
- my $ts = _time_str;
+ my $ts = _time_str;
for my $fh (@$self)
{
print $fh ($skip ? "" : $ts), @_ or $ok = 0;
- $fh->flush or $ok = 0;
+ $fh->flush or $ok = 0;
$skip = 0;
}
return $ok;
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index dca1b3b17c4..1ca2cc59170 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -142,14 +142,15 @@ BEGIN
# Must be set early
$windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys';
# Check if this environment is MSYS2.
- $is_msys2 = $windows_os && -x '/usr/bin/uname' &&
- `uname -or` =~ /^[2-9].*Msys/;
+ $is_msys2 =
+ $windows_os
+ && -x '/usr/bin/uname'
+ && `uname -or` =~ /^[2-9].*Msys/;
if ($windows_os)
{
require Win32API::File;
- Win32API::File->import(
- qw(createFile OsFHandleOpen CloseHandle));
+ Win32API::File->import(qw(createFile OsFHandleOpen CloseHandle));
}
# Specifies whether to use Unix sockets for test setups. On
@@ -428,12 +429,16 @@ sub pump_until
last if $$stream =~ /$until/;
if ($timeout->is_expired)
{
- diag("pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\"");
+ diag(
+ "pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\""
+ );
return 0;
}
if (not $proc->pumpable())
{
- diag("pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\"");
+ diag(
+ "pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\""
+ );
return 0;
}
$proc->pump();
diff --git a/src/test/perl/PostgreSQL/Version.pm b/src/test/perl/PostgreSQL/Version.pm
index 30d328103b5..8f704911895 100644
--- a/src/test/perl/PostgreSQL/Version.pm
+++ b/src/test/perl/PostgreSQL/Version.pm
@@ -151,14 +151,14 @@ a dot unless the separator argument is given.
sub major
{
- my ($self, %params) = @_;
- my $result = $self->{num}->[0];
- if ($result + 0 < 10)
- {
- my $sep = $params{separator} || '.';
- $result .= "$sep$self->{num}->[1]";
- }
- return $result;
+ my ($self, %params) = @_;
+ my $result = $self->{num}->[0];
+ if ($result + 0 < 10)
+ {
+ my $sep = $params{separator} || '.';
+ $result .= "$sep$self->{num}->[1]";
+ }
+ return $result;
}
1;
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index 583ee87da82..86864098f9e 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -374,7 +374,8 @@ sub replay_check
);
my $primary_lsn = $node_primary->lsn('write');
$node_primary->wait_for_catchup($node_standby_1, 'replay', $primary_lsn);
- $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $primary_lsn);
+ $node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $primary_lsn);
$node_standby_1->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl
index 01c52d8e7f7..d69da4e5efd 100644
--- a/src/test/recovery/t/002_archiving.pl
+++ b/src/test/recovery/t/002_archiving.pl
@@ -125,7 +125,7 @@ my $log_location = -s $node_standby2->logfile;
$node_standby2->promote;
# Check the logs of the standby to see that the commands have failed.
-my $log_contents = slurp_file($node_standby2->logfile, $log_location);
+my $log_contents = slurp_file($node_standby2->logfile, $log_location);
my $node_standby2_data = $node_standby2->data_dir;
like(
diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl
index 3ccced2ea24..0cd0467fbb8 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -206,62 +206,68 @@ my $stats_test_slot2 = 'logical_slot';
# Test that reset works for pg_stat_replication_slots
# Stats exist for stats test slot 1
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t|t), qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+ ),
+ qq(t|t),
+ qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.)
+);
# Do reset of stats for stats test slot 1
-$node_primary->safe_psql(
- 'postgres',
- qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))
-);
+$node_primary->safe_psql('postgres',
+ qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')));
# Get reset value after reset
-my $reset1 = $node_primary->safe_psql(
- 'postgres',
+my $reset1 = $node_primary->safe_psql('postgres',
qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
);
# Do reset again
-$node_primary->safe_psql(
- 'postgres',
- qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))
-);
+$node_primary->safe_psql('postgres',
+ qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')));
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t|t), qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+ ),
+ qq(t|t),
+ qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.)
+);
# Check that test slot 2 has NULL in reset timestamp
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
-), qq(t), qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
+ ),
+ qq(t),
+ qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.));
# Get reset value again for test slot 1
-$reset1 = $node_primary->safe_psql(
- 'postgres',
+$reset1 = $node_primary->safe_psql('postgres',
qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
);
# Reset stats for all replication slots
-$node_primary->safe_psql(
- 'postgres',
- qq(SELECT pg_stat_reset_replication_slot(NULL))
-);
+$node_primary->safe_psql('postgres',
+ qq(SELECT pg_stat_reset_replication_slot(NULL)));
# Check that test slot 2 reset timestamp is no longer NULL after reset
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
-), qq(t), qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.));
-
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t), qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
+ ),
+ qq(t),
+ qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.)
+);
+
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+ ),
+ qq(t),
+ qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.)
+);
# done with the node
$node_primary->stop;
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index 10da6cb0c1c..c22844d39c0 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -66,7 +66,8 @@ CREATE TABLE alive(status text);
INSERT INTO alive VALUES($$committed-before-sigquit$$);
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
'acquired pid for SIGQUIT');
my $pid = $killme_stdout;
chomp($pid);
@@ -78,7 +79,9 @@ $killme_stdin .= q[
BEGIN;
INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status;
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigquit/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigquit/m),
'inserted in-progress-before-sigquit');
$killme_stdout = '';
$killme_stderr = '';
@@ -91,7 +94,8 @@ $monitor_stdin .= q[
SELECT $$psql-connected$$;
SELECT pg_sleep(3600);
];
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
+ok( pump_until(
+ $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
@@ -145,7 +149,8 @@ $monitor->run();
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
"acquired pid for SIGKILL");
$pid = $killme_stdout;
chomp($pid);
@@ -158,7 +163,9 @@ INSERT INTO alive VALUES($$committed-before-sigkill$$) RETURNING status;
BEGIN;
INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status;
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigkill/m),
'inserted in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -170,7 +177,8 @@ $monitor_stdin .= q[
SELECT $$psql-connected$$;
SELECT pg_sleep(3600);
];
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
+ok( pump_until(
+ $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl
index 0dca3f69fe3..72895104ed9 100644
--- a/src/test/recovery/t/014_unlogged_reinit.pl
+++ b/src/test/recovery/t/014_unlogged_reinit.pl
@@ -44,7 +44,8 @@ is($node->safe_psql('postgres', "SELECT nextval('seq_unlogged')"),
my $tablespaceDir = PostgreSQL::Test::Utils::tempdir;
-$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'");
+$node->safe_psql('postgres',
+ "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'");
$node->safe_psql('postgres',
'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1');
diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl
index 5654f3b5455..6bbf55c3ee1 100644
--- a/src/test/recovery/t/019_replslot_limit.pl
+++ b/src/test/recovery/t/019_replslot_limit.pl
@@ -347,16 +347,18 @@ while (1)
my ($stdout, $stderr);
$senderpid = $node_primary3->safe_psql('postgres',
- "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'");
+ "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'");
last if $senderpid =~ qr/^[0-9]+$/;
diag "multiple walsenders active in iteration $i";
# show information about all active connections
- $node_primary3->psql('postgres',
- "\\a\\t\nSELECT * FROM pg_stat_activity",
- stdout => \$stdout, stderr => \$stderr);
+ $node_primary3->psql(
+ 'postgres',
+ "\\a\\t\nSELECT * FROM pg_stat_activity",
+ stdout => \$stdout,
+ stderr => \$stderr);
diag $stdout, $stderr;
# unlikely that the problem would resolve after 15s, so give up at point
diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl
index 24fb141785d..53a55c7a8ac 100644
--- a/src/test/recovery/t/022_crash_temp_files.pl
+++ b/src/test/recovery/t/022_crash_temp_files.pl
@@ -53,7 +53,8 @@ my $killme = IPC::Run::start(
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
'acquired pid for SIGKILL');
my $pid = $killme_stdout;
chomp($pid);
@@ -82,7 +83,8 @@ BEGIN;
INSERT INTO tab_crash (a) VALUES(1);
SELECT $$insert-tuple-to-lock-next-insert$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-to-lock-next-insert/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -95,7 +97,9 @@ BEGIN;
SELECT $$in-progress-before-sigkill$$;
INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigkill/m),
'insert in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -117,7 +121,8 @@ END; $c$;
SELECT $$insert-tuple-lock-waiting$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-lock-waiting/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -167,7 +172,8 @@ $killme->run();
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
'acquired pid for SIGKILL');
$pid = $killme_stdout;
chomp($pid);
@@ -184,7 +190,8 @@ BEGIN;
INSERT INTO tab_crash (a) VALUES(1);
SELECT $$insert-tuple-to-lock-next-insert$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-to-lock-next-insert/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -197,7 +204,9 @@ BEGIN;
SELECT $$in-progress-before-sigkill$$;
INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigkill/m),
'insert in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -219,7 +228,8 @@ END; $c$;
SELECT $$insert-tuple-lock-waiting$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-lock-waiting/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl
index be9799c0a46..fdb4ea0bf50 100644
--- a/src/test/recovery/t/027_stream_regress.pl
+++ b/src/test/recovery/t/027_stream_regress.pl
@@ -19,7 +19,8 @@ $node_primary->init(allows_streaming => 1);
# Increase some settings that Cluster->new makes too low by default.
$node_primary->adjust_conf('postgresql.conf', 'max_connections', '25');
-$node_primary->append_conf('postgresql.conf', 'max_prepared_transactions = 10');
+$node_primary->append_conf('postgresql.conf',
+ 'max_prepared_transactions = 10');
# We'll stick with Cluster->new's small default shared_buffers, but since that
# makes synchronized seqscans more probable, it risks changing the results of
# some test queries. Disable synchronized seqscans to prevent that.
@@ -27,18 +28,19 @@ $node_primary->append_conf('postgresql.conf', 'synchronize_seqscans = off');
# WAL consistency checking is resource intensive so require opt-in with the
# PG_TEST_EXTRA environment variable.
-if ($ENV{PG_TEST_EXTRA} &&
- $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/) {
+if ( $ENV{PG_TEST_EXTRA}
+ && $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/)
+{
$node_primary->append_conf('postgresql.conf',
'wal_consistency_checking = all');
}
$node_primary->start;
is( $node_primary->psql(
- 'postgres',
- qq[SELECT pg_create_physical_replication_slot('standby_1');]),
- 0,
- 'physical slot created on primary');
+ 'postgres',
+ qq[SELECT pg_create_physical_replication_slot('standby_1');]),
+ 0,
+ 'physical slot created on primary');
my $backup_name = 'my_backup';
# Take backup
@@ -49,25 +51,29 @@ my $node_standby_1 = PostgreSQL::Test::Cluster->new('standby_1');
$node_standby_1->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby_1->append_conf('postgresql.conf',
- "primary_slot_name = standby_1");
+ "primary_slot_name = standby_1");
$node_standby_1->append_conf('postgresql.conf',
'max_standby_streaming_delay = 600s');
$node_standby_1->start;
-my $dlpath = dirname($ENV{REGRESS_SHLIB});
+my $dlpath = dirname($ENV{REGRESS_SHLIB});
my $outputdir = $PostgreSQL::Test::Utils::tmp_check;
# Run the regression tests against the primary.
my $extra_opts = $ENV{EXTRA_REGRESS_OPTS} || "";
-my $rc = system($ENV{PG_REGRESS} . " $extra_opts " .
- "--dlpath=\"$dlpath\" " .
- "--bindir= " .
- "--host=" . $node_primary->host . " " .
- "--port=" . $node_primary->port . " " .
- "--schedule=../regress/parallel_schedule " .
- "--max-concurrent-tests=20 " .
- "--inputdir=../regress " .
- "--outputdir=\"$outputdir\"");
+my $rc =
+ system($ENV{PG_REGRESS}
+ . " $extra_opts "
+ . "--dlpath=\"$dlpath\" "
+ . "--bindir= "
+ . "--host="
+ . $node_primary->host . " "
+ . "--port="
+ . $node_primary->port . " "
+ . "--schedule=../regress/parallel_schedule "
+ . "--max-concurrent-tests=20 "
+ . "--inputdir=../regress "
+ . "--outputdir=\"$outputdir\"");
if ($rc != 0)
{
# Dump out the regression diffs file, if there is one
@@ -92,12 +98,16 @@ $node_primary->wait_for_catchup($node_standby_1, 'replay',
# Perform a logical dump of primary and standby, and check that they match
command_ok(
- [ 'pg_dumpall', '-f', $outputdir . '/primary.dump', '--no-sync',
- '-p', $node_primary->port ],
+ [
+ 'pg_dumpall', '-f', $outputdir . '/primary.dump',
+ '--no-sync', '-p', $node_primary->port
+ ],
'dump primary server');
command_ok(
- [ 'pg_dumpall', '-f', $outputdir . '/standby.dump', '--no-sync',
- '-p', $node_standby_1->port ],
+ [
+ 'pg_dumpall', '-f', $outputdir . '/standby.dump',
+ '--no-sync', '-p', $node_standby_1->port
+ ],
'dump standby server');
command_ok(
[ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump' ],
diff --git a/src/test/recovery/t/029_stats_restart.pl b/src/test/recovery/t/029_stats_restart.pl
index 2fe8db88079..1bf7b568ccb 100644
--- a/src/test/recovery/t/029_stats_restart.pl
+++ b/src/test/recovery/t/029_stats_restart.pl
@@ -273,7 +273,8 @@ $sect = "post immediate restart";
my $wal_restart_immediate = wal_stats();
cmp_ok(
- $wal_reset_restart->{reset}, 'lt',
+ $wal_reset_restart->{reset},
+ 'lt',
$wal_restart_immediate->{reset},
"$sect: reset timestamp is new");
diff --git a/src/test/recovery/t/031_recovery_conflict.pl b/src/test/recovery/t/031_recovery_conflict.pl
index 8dcb3da0de9..545d523edff 100644
--- a/src/test/recovery/t/031_recovery_conflict.pl
+++ b/src/test/recovery/t/031_recovery_conflict.pl
@@ -229,8 +229,10 @@ $expected_conflicts++;
# Want to test recovery deadlock conflicts, not buffer pin conflicts. Without
# changing max_standby_streaming_delay it'd be timing dependent what we hit
# first
-$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay',
- "${PostgreSQL::Test::Utils::timeout_default}s");
+$node_standby->adjust_conf(
+ 'postgresql.conf',
+ 'max_standby_streaming_delay',
+ "${PostgreSQL::Test::Utils::timeout_default}s");
$node_standby->restart();
reconnect_and_clear();
@@ -289,7 +291,8 @@ check_conflict_stat("deadlock");
# clean up for next tests
$node_primary->safe_psql($test_db, qq[ROLLBACK PREPARED 'lock';]);
-$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay', '50ms');
+$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay',
+ '50ms');
$node_standby->restart();
reconnect_and_clear();
diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl
index ac9340b7dd1..ae7e32763fb 100644
--- a/src/test/recovery/t/032_relfilenode_reuse.pl
+++ b/src/test/recovery/t/032_relfilenode_reuse.pl
@@ -8,7 +8,8 @@ use File::Basename;
my $node_primary = PostgreSQL::Test::Cluster->new('primary');
$node_primary->init(allows_streaming => 1);
-$node_primary->append_conf('postgresql.conf', q[
+$node_primary->append_conf(
+ 'postgresql.conf', q[
allow_in_place_tablespaces = true
log_connections=on
# to avoid "repairing" corruption
@@ -61,28 +62,28 @@ $psql_standby{run} = IPC::Run::start(
# rows. Using a template database + preexisting rows makes it a bit easier to
# reproduce, because there's no cache invalidations generated.
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db_template OID = 50000;");
-$node_primary->safe_psql('conflict_db_template', q[
+$node_primary->safe_psql('postgres',
+ "CREATE DATABASE conflict_db_template OID = 50000;");
+$node_primary->safe_psql(
+ 'conflict_db_template', q[
CREATE TABLE large(id serial primary key, dataa text, datab text);
- INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);]);
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
+ INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);]
+);
+$node_primary->safe_psql('postgres',
+ "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
-$node_primary->safe_psql('postgres', q[
+$node_primary->safe_psql(
+ 'postgres', q[
CREATE EXTENSION pg_prewarm;
CREATE TABLE replace_sb(data text);
- INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);]);
+ INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);]
+);
$node_primary->wait_for_catchup($node_standby);
# Use longrunning transactions, so that AtEOXact_SMgr doesn't close files
-send_query_and_wait(
- \%psql_primary,
- q[BEGIN;],
- qr/BEGIN/m);
-send_query_and_wait(
- \%psql_standby,
- q[BEGIN;],
- qr/BEGIN/m);
+send_query_and_wait(\%psql_primary, q[BEGIN;], qr/BEGIN/m);
+send_query_and_wait(\%psql_standby, q[BEGIN;], qr/BEGIN/m);
# Cause lots of dirty rows in shared_buffers
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 1;");
@@ -94,10 +95,10 @@ cause_eviction(\%psql_primary, \%psql_standby);
# drop and recreate database
$node_primary->safe_psql('postgres', "DROP DATABASE conflict_db;");
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
+$node_primary->safe_psql('postgres',
+ "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
-verify($node_primary, $node_standby, 1,
- "initial contents as expected");
+verify($node_primary, $node_standby, 1, "initial contents as expected");
# Again cause lots of dirty rows in shared_buffers, but use a different update
# value so we can check everything is OK
@@ -109,17 +110,17 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 2;");
cause_eviction(\%psql_primary, \%psql_standby);
verify($node_primary, $node_standby, 2,
- "update to reused relfilenode (due to DB oid conflict) is not lost");
+ "update to reused relfilenode (due to DB oid conflict) is not lost");
$node_primary->safe_psql('conflict_db', "VACUUM FULL large;");
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 3;");
-verify($node_primary, $node_standby, 3,
- "restored contents as expected");
+verify($node_primary, $node_standby, 3, "restored contents as expected");
# Test for old filehandles after moving a database in / out of tablespace
-$node_primary->safe_psql('postgres', q[CREATE TABLESPACE test_tablespace LOCATION '']);
+$node_primary->safe_psql('postgres',
+ q[CREATE TABLESPACE test_tablespace LOCATION '']);
# cause dirty buffers
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;");
@@ -127,23 +128,25 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;");
cause_eviction(\%psql_primary, \%psql_standby);
# move database back / forth
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE pg_default');
+$node_primary->safe_psql('postgres',
+ 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres',
+ 'ALTER DATABASE conflict_db SET TABLESPACE pg_default');
# cause dirty buffers
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 5;");
cause_eviction(\%psql_primary, \%psql_standby);
-verify($node_primary, $node_standby, 5,
- "post move contents as expected");
+verify($node_primary, $node_standby, 5, "post move contents as expected");
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres',
+ 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 7;");
cause_eviction(\%psql_primary, \%psql_standby);
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 8;");
-$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db');
-$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db');
+$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace');
$node_primary->safe_psql('postgres', 'REINDEX TABLE pg_database');
@@ -160,25 +163,28 @@ $node_standby->stop();
# Make sure that there weren't crashes during shutdown
-command_like([ 'pg_controldata', $node_primary->data_dir ],
- qr/Database cluster state:\s+shut down\n/, 'primary shut down ok');
-command_like([ 'pg_controldata', $node_standby->data_dir ],
- qr/Database cluster state:\s+shut down in recovery\n/, 'standby shut down ok');
+command_like(
+ [ 'pg_controldata', $node_primary->data_dir ],
+ qr/Database cluster state:\s+shut down\n/,
+ 'primary shut down ok');
+command_like(
+ [ 'pg_controldata', $node_standby->data_dir ],
+ qr/Database cluster state:\s+shut down in recovery\n/,
+ 'standby shut down ok');
done_testing();
sub verify
{
my ($primary, $standby, $counter, $message) = @_;
- my $query = "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10";
+ my $query =
+ "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10";
is($primary->safe_psql('conflict_db', $query),
- "$counter|4000",
- "primary: $message");
+ "$counter|4000", "primary: $message");
$primary->wait_for_catchup($standby);
is($standby->safe_psql('conflict_db', $query),
- "$counter|4000",
- "standby: $message");
+ "$counter|4000", "standby: $message");
}
sub cause_eviction
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index ade4b51fb8d..ba3532a51e8 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -1226,8 +1226,8 @@ PG_FUNCTION_INFO_V1(get_columns_length);
Datum
get_columns_length(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
- Oid *type_oids;
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ Oid *type_oids;
int ntypes;
int column_offset = 0;
@@ -1241,7 +1241,7 @@ get_columns_length(PG_FUNCTION_ARGS)
ntypes = ArrayGetNItems(ARR_NDIM(ta), ARR_DIMS(ta));
for (int i = 0; i < ntypes; i++)
{
- Oid typeoid = type_oids[i];
+ Oid typeoid = type_oids[i];
int16 typlen;
bool typbyval;
char typalign;
diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl
index 58d2bc336f5..c0b4a5739ce 100644
--- a/src/test/ssl/t/001_ssltests.pl
+++ b/src/test/ssl/t/001_ssltests.pl
@@ -19,10 +19,12 @@ if ($ENV{with_ssl} ne 'openssl')
}
my $ssl_server = SSL::Server->new();
+
sub sslkey
{
return $ssl_server->sslkey(@_);
}
+
sub switch_server_cert
{
$ssl_server->switch_server_cert(@_);
@@ -56,28 +58,30 @@ my $result = $node->safe_psql('postgres', "SHOW ssl_library");
is($result, $ssl_server->ssl_library(), 'ssl_library parameter');
$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR,
- $SERVERHOSTCIDR, 'trust');
+ $SERVERHOSTCIDR, 'trust');
note "testing password-protected keys";
-switch_server_cert($node,
- certfile => 'server-cn-only',
- cafile => 'root+client_ca',
- keyfile => 'server-password',
+switch_server_cert(
+ $node,
+ certfile => 'server-cn-only',
+ cafile => 'root+client_ca',
+ keyfile => 'server-password',
passphrase_cmd => 'echo wrongpassword',
- restart => 'no' );
+ restart => 'no');
command_fails(
[ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
'restart fails with password-protected key file with wrong password');
$node->_update_pid(0);
-switch_server_cert($node,
- certfile => 'server-cn-only',
- cafile => 'root+client_ca',
- keyfile => 'server-password',
+switch_server_cert(
+ $node,
+ certfile => 'server-cn-only',
+ cafile => 'root+client_ca',
+ keyfile => 'server-password',
passphrase_cmd => 'echo secret1',
- restart => 'no');
+ restart => 'no');
command_ok(
[ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
@@ -115,7 +119,8 @@ switch_server_cert($node, certfile => 'server-cn-only');
# Set of default settings for SSL parameters in connection string. This
# makes the tests protected against any defaults the environment may have
# in ~/.postgresql/.
-my $default_ssl_connstr = "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
+my $default_ssl_connstr =
+ "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
$common_connstr =
"$default_ssl_connstr user=ssltestuser dbname=trustdb hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
@@ -416,9 +421,11 @@ switch_server_cert($node, certfile => 'server-ip-cn-and-dns-alt-names');
$node->connect_ok("$common_connstr host=192.0.2.1",
"certificate with both an IP CN and DNS SANs matches CN");
-$node->connect_ok("$common_connstr host=dns1.alt-name.pg-ssltest.test",
+$node->connect_ok(
+ "$common_connstr host=dns1.alt-name.pg-ssltest.test",
"certificate with both an IP CN and DNS SANs matches SAN 1");
-$node->connect_ok("$common_connstr host=dns2.alt-name.pg-ssltest.test",
+$node->connect_ok(
+ "$common_connstr host=dns2.alt-name.pg-ssltest.test",
"certificate with both an IP CN and DNS SANs matches SAN 2");
# Finally, test a server certificate that has no CN or SANs. Of course, that's
@@ -506,42 +513,50 @@ $node->connect_fails(
# correct client cert in unencrypted PEM
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"certificate authorization succeeds with correct client cert in PEM format"
);
# correct client cert in unencrypted DER
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-der.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-der.key'),
"certificate authorization succeeds with correct client cert in DER format"
);
# correct client cert in encrypted PEM
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword='dUmmyP^#+'",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key')
+ . " sslpassword='dUmmyP^#+'",
"certificate authorization succeeds with correct client cert in encrypted PEM format"
);
# correct client cert in encrypted DER
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-der.key') . " sslpassword='dUmmyP^#+'",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-der.key')
+ . " sslpassword='dUmmyP^#+'",
"certificate authorization succeeds with correct client cert in encrypted DER format"
);
# correct client cert in encrypted PEM with wrong password
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword='wrong'",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key')
+ . " sslpassword='wrong'",
"certificate authorization fails with correct client cert and wrong password in encrypted PEM format",
expected_stderr =>
- qr!private key file \".*client-encrypted-pem\.key\": bad decrypt!,
-);
+ qr!private key file \".*client-encrypted-pem\.key\": bad decrypt!,);
# correct client cert using whole DN
my $dn_connstr = "$common_connstr dbname=certdb_dn";
$node->connect_ok(
- "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+ "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+ . sslkey('client-dn.key'),
"certificate authorization succeeds with DN mapping",
log_like => [
qr/connection authenticated: identity="CN=ssltestuser-dn,OU=Testing,OU=Engineering,O=PGDG" method=cert/
@@ -551,14 +566,16 @@ $node->connect_ok(
$dn_connstr = "$common_connstr dbname=certdb_dn_re";
$node->connect_ok(
- "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+ "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+ . sslkey('client-dn.key'),
"certificate authorization succeeds with DN regex mapping");
# same thing but using explicit CN
$dn_connstr = "$common_connstr dbname=certdb_cn";
$node->connect_ok(
- "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+ "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+ . sslkey('client-dn.key'),
"certificate authorization succeeds with CN mapping",
# the full DN should still be used as the authenticated identity
log_like => [
@@ -576,7 +593,9 @@ TODO:
# correct client cert in encrypted PEM with empty password
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword=''",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key')
+ . " sslpassword=''",
"certificate authorization fails with correct client cert and empty password in encrypted PEM format",
expected_stderr =>
qr!private key file \".*client-encrypted-pem\.key\": processing error!
@@ -584,7 +603,8 @@ TODO:
# correct client cert in encrypted PEM with no password
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key'),
"certificate authorization fails with correct client cert and no password in encrypted PEM format",
expected_stderr =>
qr!private key file \".*client-encrypted-pem\.key\": processing error!
@@ -630,7 +650,8 @@ command_like(
'-P',
'null=_null_',
'-d',
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
'-c',
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
@@ -644,7 +665,8 @@ SKIP:
skip "Permissions check not enforced on Windows", 2 if ($windows_os);
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client_wrongperms.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client_wrongperms.key'),
"certificate authorization fails because of file permissions",
expected_stderr =>
qr!private key file \".*client_wrongperms\.key\" has group or world access!
@@ -653,7 +675,8 @@ SKIP:
# client cert belonging to another user
$node->connect_fails(
- "$common_connstr user=anotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=anotheruser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"certificate authorization fails with client cert belonging to another user",
expected_stderr =>
qr/certificate authentication failed for user "anotheruser"/,
@@ -663,7 +686,8 @@ $node->connect_fails(
# revoked client cert
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt " . sslkey('client-revoked.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt "
+ . sslkey('client-revoked.key'),
"certificate authorization fails with revoked client cert",
expected_stderr => qr/SSL error: sslv3 alert certificate revoked/,
# revoked certificates should not authenticate the user
@@ -676,13 +700,15 @@ $common_connstr =
"$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=verifydb hostaddr=$SERVERHOSTADDR host=localhost";
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"auth_option clientcert=verify-full succeeds with matching username and Common Name",
# verify-full does not provide authentication
log_unlike => [qr/connection authenticated:/],);
$node->connect_fails(
- "$common_connstr user=anotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=anotheruser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"auth_option clientcert=verify-full fails with mismatching username and Common Name",
expected_stderr =>
qr/FATAL: .* "trust" authentication failed for user "anotheruser"/,
@@ -692,7 +718,8 @@ $node->connect_fails(
# Check that connecting with auth-option verify-ca in pg_hba :
# works, when username doesn't match Common Name
$node->connect_ok(
- "$common_connstr user=yetanotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=yetanotheruser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"auth_option clientcert=verify-ca succeeds with mismatching username and Common Name",
# verify-full does not provide authentication
log_unlike => [qr/connection authenticated:/],);
@@ -700,7 +727,9 @@ $node->connect_ok(
# intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file
switch_server_cert($node, certfile => 'server-cn-only', cafile => 'root_ca');
$common_connstr =
- "$default_ssl_connstr user=ssltestuser dbname=certdb " . sslkey('client.key') . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost";
+ "$default_ssl_connstr user=ssltestuser dbname=certdb "
+ . sslkey('client.key')
+ . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost";
$node->connect_ok(
"$common_connstr sslmode=require sslcert=ssl/client+client_ca.crt",
@@ -711,11 +740,15 @@ $node->connect_fails(
expected_stderr => qr/SSL error: tlsv1 alert unknown ca/);
# test server-side CRL directory
-switch_server_cert($node, certfile => 'server-cn-only', crldir => 'root+client-crldir');
+switch_server_cert(
+ $node,
+ certfile => 'server-cn-only',
+ crldir => 'root+client-crldir');
# revoked client cert
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt " . sslkey('client-revoked.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt "
+ . sslkey('client-revoked.key'),
"certificate authorization fails with revoked client cert with server-side CRL directory",
expected_stderr => qr/SSL error: sslv3 alert certificate revoked/);
diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl
index 4354901f539..588f47a39b9 100644
--- a/src/test/ssl/t/002_scram.pl
+++ b/src/test/ssl/t/002_scram.pl
@@ -22,10 +22,12 @@ if ($ENV{with_ssl} ne 'openssl')
}
my $ssl_server = SSL::Server->new();
+
sub sslkey
{
return $ssl_server->sslkey(@_);
}
+
sub switch_server_cert
{
$ssl_server->switch_server_cert(@_);
@@ -57,8 +59,11 @@ $ENV{PGPORT} = $node->port;
$node->start;
# Configure server for SSL connections, with password handling.
-$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
- "scram-sha-256", 'password' => "pass", 'password_enc' => "scram-sha-256");
+$ssl_server->configure_test_server_for_ssl(
+ $node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
+ "scram-sha-256",
+ 'password' => "pass",
+ 'password_enc' => "scram-sha-256");
switch_server_cert($node, certfile => 'server-cn-only');
$ENV{PGPASSWORD} = "pass";
$common_connstr =
@@ -104,7 +109,7 @@ $node->connect_fails(
# because channel binding is not performed. Note that ssl/client.key may
# be used in a different test, so the name of this temporary client key
# is chosen here to be unique.
-my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
+my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
my $client_tmp_key = "$cert_tempdir/client_scram.key";
copy("ssl/client.key", "$cert_tempdir/client_scram.key")
or die
diff --git a/src/test/ssl/t/003_sslinfo.pl b/src/test/ssl/t/003_sslinfo.pl
index 96a5db86721..87fb18a31e0 100644
--- a/src/test/ssl/t/003_sslinfo.pl
+++ b/src/test/ssl/t/003_sslinfo.pl
@@ -21,10 +21,12 @@ if ($ENV{with_ssl} ne 'openssl')
#### Some configuration
my $ssl_server = SSL::Server->new();
+
sub sslkey
{
return $ssl_server->sslkey(@_);
}
+
sub switch_server_cert
{
$ssl_server->switch_server_cert(@_);
@@ -52,8 +54,8 @@ $ENV{PGHOST} = $node->host;
$ENV{PGPORT} = $node->port;
$node->start;
-$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
- 'trust', extensions => [ qw(sslinfo) ]);
+$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR,
+ $SERVERHOSTCIDR, 'trust', extensions => [qw(sslinfo)]);
# We aren't using any CRL's in this suite so we can keep using server-revoked
# as server certificate for simple client.crt connection much like how the
@@ -63,11 +65,13 @@ switch_server_cert($node, certfile => 'server-revoked');
# Set of default settings for SSL parameters in connection string. This
# makes the tests protected against any defaults the environment may have
# in ~/.postgresql/.
-my $default_ssl_connstr = "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
+my $default_ssl_connstr =
+ "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
$common_connstr =
- "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR host=localhost " .
- "user=ssltestuser sslcert=ssl/client_ext.crt " . sslkey('client_ext.key');
+ "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR host=localhost "
+ . "user=ssltestuser sslcert=ssl/client_ext.crt "
+ . sslkey('client_ext.key');
# Make sure we can connect even though previous test suites have established this
$node->connect_ok(
@@ -77,62 +81,85 @@ $node->connect_ok(
my $result;
-$result = $node->safe_psql("certdb", "SELECT ssl_is_used();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_is_used();",
+ connstr => $common_connstr);
is($result, 't', "ssl_is_used() for TLS connection");
-$result = $node->safe_psql("certdb", "SELECT ssl_version();",
- connstr => $common_connstr . " ssl_min_protocol_version=TLSv1.2 " .
- "ssl_max_protocol_version=TLSv1.2");
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_version();",
+ connstr => $common_connstr
+ . " ssl_min_protocol_version=TLSv1.2 "
+ . "ssl_max_protocol_version=TLSv1.2");
is($result, 'TLSv1.2', "ssl_version() correctly returning TLS protocol");
-$result = $node->safe_psql("certdb",
- "SELECT ssl_cipher() = cipher FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_cipher() = cipher FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_cipher() compared with pg_stat_ssl");
-$result = $node->safe_psql("certdb", "SELECT ssl_client_cert_present();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_client_cert_present();",
+ connstr => $common_connstr);
is($result, 't', "ssl_client_cert_present() for connection with cert");
-$result = $node->safe_psql("trustdb", "SELECT ssl_client_cert_present();",
- connstr => "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require " .
- "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost");
+$result = $node->safe_psql(
+ "trustdb",
+ "SELECT ssl_client_cert_present();",
+ connstr =>
+ "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require "
+ . "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost"
+);
is($result, 'f', "ssl_client_cert_present() for connection without cert");
-$result = $node->safe_psql("certdb",
- "SELECT ssl_client_serial() = client_serial FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_client_serial() = client_serial FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_client_serial() compared with pg_stat_ssl");
# Must not use safe_psql since we expect an error here
-$result = $node->psql("certdb", "SELECT ssl_client_dn_field('invalid');",
- connstr => $common_connstr);
+$result = $node->psql(
+ "certdb",
+ "SELECT ssl_client_dn_field('invalid');",
+ connstr => $common_connstr);
is($result, '3', "ssl_client_dn_field() for an invalid field");
-$result = $node->safe_psql("trustdb", "SELECT ssl_client_dn_field('commonName');",
- connstr => "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require " .
- "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost");
+$result = $node->safe_psql(
+ "trustdb",
+ "SELECT ssl_client_dn_field('commonName');",
+ connstr =>
+ "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require "
+ . "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost"
+);
is($result, '', "ssl_client_dn_field() for connection without cert");
-$result = $node->safe_psql("certdb",
- "SELECT '/CN=' || ssl_client_dn_field('commonName') = client_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT '/CN=' || ssl_client_dn_field('commonName') = client_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_client_dn_field() for commonName");
-$result = $node->safe_psql("certdb",
- "SELECT ssl_issuer_dn() = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_issuer_dn() = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_issuer_dn() for connection with cert");
-$result = $node->safe_psql("certdb",
- "SELECT '/CN=' || ssl_issuer_field('commonName') = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT '/CN=' || ssl_issuer_field('commonName') = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_issuer_field() for commonName");
-$result = $node->safe_psql("certdb",
- "SELECT value, critical FROM ssl_extension_info() WHERE name = 'basicConstraints';",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT value, critical FROM ssl_extension_info() WHERE name = 'basicConstraints';",
+ connstr => $common_connstr);
is($result, 'CA:FALSE|t', 'extract extension from cert');
done_testing();
diff --git a/src/test/ssl/t/SSL/Backend/OpenSSL.pm b/src/test/ssl/t/SSL/Backend/OpenSSL.pm
index d6d99fa636a..aed6005b432 100644
--- a/src/test/ssl/t/SSL/Backend/OpenSSL.pm
+++ b/src/test/ssl/t/SSL/Backend/OpenSSL.pm
@@ -84,7 +84,7 @@ sub init
# the tests. To get the full path for inclusion in connection strings, the
# %key hash can be interrogated.
my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
- my @keys = (
+ my @keys = (
"client.key", "client-revoked.key",
"client-der.key", "client-encrypted-pem.key",
"client-encrypted-der.key", "client-dn.key",
@@ -108,8 +108,10 @@ sub init
or die
"couldn't copy ssl/client_key to $cert_tempdir/client_wrongperms.key for permission change: $!";
chmod 0644, "$cert_tempdir/client_wrongperms.key"
- or die "failed to change permissions on $cert_tempdir/client_wrongperms.key: $!";
- $self->{key}->{'client_wrongperms.key'} = "$cert_tempdir/client_wrongperms.key";
+ or die
+ "failed to change permissions on $cert_tempdir/client_wrongperms.key: $!";
+ $self->{key}->{'client_wrongperms.key'} =
+ "$cert_tempdir/client_wrongperms.key";
$self->{key}->{'client_wrongperms.key'} =~ s!\\!/!g
if $PostgreSQL::Test::Utils::windows_os;
}
@@ -171,9 +173,10 @@ sub set_server_cert
{
my ($self, $params) = @_;
- $params->{cafile} = 'root+client_ca' unless defined $params->{cafile};
+ $params->{cafile} = 'root+client_ca' unless defined $params->{cafile};
$params->{crlfile} = 'root+client.crl' unless defined $params->{crlfile};
- $params->{keyfile} = $params->{certfile} unless defined $params->{keyfile};
+ $params->{keyfile} = $params->{certfile}
+ unless defined $params->{keyfile};
my $sslconf =
"ssl_ca_file='$params->{cafile}.crt'\n"
diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm
index de460c2d96f..62f54dcbf16 100644
--- a/src/test/ssl/t/SSL/Server.pm
+++ b/src/test/ssl/t/SSL/Server.pm
@@ -94,7 +94,7 @@ sub new
bless $self, $class;
if ($flavor =~ /\Aopenssl\z/i)
{
- $self->{flavor} = 'openssl';
+ $self->{flavor} = 'openssl';
$self->{backend} = SSL::Backend::OpenSSL->new();
}
else
@@ -115,7 +115,7 @@ string.
sub sslkey
{
- my $self = shift;
+ my $self = shift;
my $keyfile = shift;
my $backend = $self->{backend};
@@ -140,12 +140,14 @@ C<listen_addresses> and B<cidr> for configuring C<pg_hba.conf>.
sub configure_test_server_for_ssl
{
- my $self=shift;
+ my $self = shift;
my ($node, $serverhost, $servercidr, $authmethod, %params) = @_;
my $backend = $self->{backend};
- my $pgdata = $node->data_dir;
+ my $pgdata = $node->data_dir;
- my @databases = ( 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re', 'certdb_cn', 'verifydb' );
+ my @databases = (
+ 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re',
+ 'certdb_cn', 'verifydb');
# Create test users and databases
$node->psql('postgres', "CREATE USER ssltestuser");
@@ -162,7 +164,7 @@ sub configure_test_server_for_ssl
if (defined($params{password}))
{
die "Password encryption must be specified when password is set"
- unless defined($params{password_enc});
+ unless defined($params{password_enc});
$node->psql('postgres',
"SET password_encryption='$params{password_enc}'; ALTER USER ssltestuser PASSWORD '$params{password}';"
@@ -179,7 +181,7 @@ sub configure_test_server_for_ssl
# Create any extensions requested in the setup
if (defined($params{extensions}))
{
- foreach my $extension (@{$params{extensions}})
+ foreach my $extension (@{ $params{extensions} })
{
foreach my $db (@databases)
{
@@ -227,7 +229,7 @@ Get the name of the currently used SSL backend.
sub ssl_library
{
- my $self = shift;
+ my $self = shift;
my $backend = $self->{backend};
return $backend->get_library();
@@ -282,16 +284,17 @@ returning.
sub switch_server_cert
{
- my $self = shift;
- my $node = shift;
+ my $self = shift;
+ my $node = shift;
my $backend = $self->{backend};
- my %params = @_;
- my $pgdata = $node->data_dir;
+ my %params = @_;
+ my $pgdata = $node->data_dir;
open my $sslconf, '>', "$pgdata/sslconfig.conf";
print $sslconf "ssl=on\n";
print $sslconf $backend->set_server_cert(\%params);
- print $sslconf "ssl_passphrase_command='" . $params{passphrase_cmd} . "'\n"
+ print $sslconf "ssl_passphrase_command='"
+ . $params{passphrase_cmd} . "'\n"
if defined $params{passphrase_cmd};
close $sslconf;
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index d35a133f154..f53b3b7db0c 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -427,7 +427,9 @@ $node_subscriber->safe_psql('postgres',
);
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after changing CONNECTION";
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing CONNECTION";
$oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
@@ -437,7 +439,9 @@ $node_subscriber->safe_psql('postgres',
);
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after changing PUBLICATION";
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing PUBLICATION";
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1001,1100)");
@@ -489,16 +493,14 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_notrep VALUES (11)");
$node_publisher->wait_for_catchup('tap_sub');
$logfile = slurp_file($node_publisher->logfile, $log_location);
-ok( $logfile =~
- qr/skipped replication of an empty transaction with XID/,
+ok($logfile =~ qr/skipped replication of an empty transaction with XID/,
'empty transaction is skipped');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT count(*) FROM tab_notrep");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_notrep");
is($result, qq(0), 'check non-replicated table is empty on subscriber');
-$node_publisher->append_conf('postgresql.conf',
- "log_min_messages = warning");
+$node_publisher->append_conf('postgresql.conf', "log_min_messages = warning");
$node_publisher->reload;
# note that data are different on provider and subscriber
@@ -519,7 +521,9 @@ $node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub_renamed' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after renaming SUBSCRIPTION";
+ )
+ or die
+ "Timed out while waiting for apply to restart after renaming SUBSCRIPTION";
# check all the cleanup
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
index 39c32eda44d..cdd6b119ffb 100644
--- a/src/test/subscription/t/007_ddl.pl
+++ b/src/test/subscription/t/007_ddl.pl
@@ -62,21 +62,21 @@ $node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Specifying non-existent publication along with add publication.
-($ret, $stdout, $stderr) = $node_subscriber->psql(
- 'postgres',
+($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
"ALTER SUBSCRIPTION mysub1 ADD PUBLICATION non_existent_pub1, non_existent_pub2"
);
ok( $stderr =~
m/WARNING: publications "non_existent_pub1", "non_existent_pub2" do not exist in the publisher/,
- "Alter subscription add publication throws warning for non-existent publications");
+ "Alter subscription add publication throws warning for non-existent publications"
+);
# Specifying non-existent publication along with set publication.
($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
- "ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub"
-);
+ "ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub");
ok( $stderr =~
m/WARNING: publication "non_existent_pub" does not exist in the publisher/,
- "Alter subscription set publication throws warning for non-existent publication");
+ "Alter subscription set publication throws warning for non-existent publication"
+);
$node_subscriber->stop;
$node_publisher->stop;
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index 66e63e755ef..e7f4a94f197 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -413,7 +413,8 @@ $node_publisher->safe_psql('postgres',
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab4 (a int PRIMARY KEY) PARTITION BY LIST (a)");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE tab4_1 PARTITION OF tab4 FOR VALUES IN (0, 1) PARTITION BY LIST (a)");
+ "CREATE TABLE tab4_1 PARTITION OF tab4 FOR VALUES IN (0, 1) PARTITION BY LIST (a)"
+);
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab4_1_1 PARTITION OF tab4_1 FOR VALUES IN (0, 1)");
@@ -479,11 +480,9 @@ $node_subscriber2->safe_psql('postgres',
# Note: We create two separate tables, not a partitioned one, so that we can
# easily identity through which relation were the changes replicated.
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab4 (a int PRIMARY KEY)"
-);
+ "CREATE TABLE tab4 (a int PRIMARY KEY)");
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab4_1 (a int PRIMARY KEY)"
-);
+ "CREATE TABLE tab4_1 (a int PRIMARY KEY)");
# Publication that sub2 points to now publishes via root, so must update
# subscription target relations. We set the list of publications so that
# the FOR ALL TABLES publication is second (the list order matters).
@@ -497,9 +496,8 @@ $node_subscriber2->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# check that data is synced correctly
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT c, a FROM tab2");
-is( $result, qq(sub1_tab2|1), 'initial data synced for pub_viaroot');
+$result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab2");
+is($result, qq(sub1_tab2|1), 'initial data synced for pub_viaroot');
# insert
$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (0)");
@@ -512,8 +510,7 @@ $node_publisher->safe_psql('postgres',
# Insert a row into the leaf partition, should be replicated through the
# partition root (thanks to the FOR ALL TABLES partition).
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab4 VALUES (0)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (0)");
$node_publisher->wait_for_catchup('sub_viaroot');
$node_publisher->wait_for_catchup('sub2');
@@ -555,13 +552,13 @@ sub2_tab3|5), 'inserts into tab3 replicated');
# tab4 change should be replicated through the root partition, which
# maps to the tab4 relation on subscriber.
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4 ORDER BY 1");
-is( $result, qq(0), 'inserts into tab4 replicated');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
+is($result, qq(0), 'inserts into tab4 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4_1 ORDER BY 1");
-is( $result, qq(), 'inserts into tab4_1 replicated');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
# now switch the order of publications in the list, try again, the result
@@ -576,21 +573,20 @@ $node_subscriber2->poll_query_until('postgres', $synced_query)
# Insert a change into the leaf partition, should be replicated through
# the partition root (thanks to the FOR ALL TABLES partition).
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab4 VALUES (1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (1)");
$node_publisher->wait_for_catchup('sub2');
# tab4 change should be replicated through the root partition, which
# maps to the tab4 relation on subscriber.
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4 ORDER BY 1");
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
is( $result, qq(0
1), 'inserts into tab4 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4_1 ORDER BY 1");
-is( $result, qq(), 'inserts into tab4_1 replicated');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
# update (replicated as update)
diff --git a/src/test/subscription/t/021_twophase.pl b/src/test/subscription/t/021_twophase.pl
index aacc0fcf462..c3e9857f7ce 100644
--- a/src/test/subscription/t/021_twophase.pl
+++ b/src/test/subscription/t/021_twophase.pl
@@ -29,7 +29,8 @@ $node_subscriber->start;
# Create some pre-existing content on publisher
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full SELECT generate_series(1,10);
PREPARE TRANSACTION 'some_initial_data';
@@ -45,7 +46,8 @@ $node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub FOR TABLE tab_full");
my $appname = 'tap_sub';
-$node_subscriber->safe_psql('postgres', "
+$node_subscriber->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub
CONNECTION '$publisher_connstr application_name=$appname'
PUBLICATION tap_pub
@@ -56,13 +58,13 @@ $node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
- "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Also wait for two-phase to be enabled
my $twophase_query =
- "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
$node_subscriber->poll_query_until('postgres', $twophase_query)
or die "Timed out while waiting for subscriber to enable twophase";
@@ -71,7 +73,8 @@ $node_subscriber->poll_query_until('postgres', $twophase_query)
# then COMMIT PREPARED
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (11);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -79,19 +82,23 @@ $node_publisher->safe_psql('postgres', "
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# check that 2PC gets committed on subscriber
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab_full';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab_full';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is committed on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
is($result, qq(1), 'Row inserted via 2PC has committed on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber');
###############################
@@ -99,7 +106,8 @@ is($result, qq(0), 'transaction is committed on subscriber');
# then ROLLBACK PREPARED
###############################
-$node_publisher->safe_psql('postgres',"
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -107,19 +115,23 @@ $node_publisher->safe_psql('postgres',"
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# check that 2PC gets aborted on subscriber
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab_full';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab_full';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is aborted on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
is($result, qq(0), 'Row inserted via 2PC is not present on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -127,7 +139,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
# (publisher and subscriber crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
INSERT INTO tab_full VALUES (13);
@@ -140,11 +153,13 @@ $node_publisher->start;
$node_subscriber->start;
# rollback post the restart
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are rolled back
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (12,13);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (12,13);");
is($result, qq(0), 'Rows rolled back are not on the subscriber');
###############################
@@ -152,7 +167,8 @@ is($result, qq(0), 'Rows rolled back are not on the subscriber');
# (publisher and subscriber crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
INSERT INTO tab_full VALUES (13);
@@ -165,11 +181,13 @@ $node_publisher->start;
$node_subscriber->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (12,13);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (12,13);");
is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
@@ -177,7 +195,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
# (subscriber only crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (14);
INSERT INTO tab_full VALUES (15);
@@ -187,11 +206,13 @@ $node_subscriber->stop('immediate');
$node_subscriber->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (14,15);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (14,15);");
is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
@@ -199,7 +220,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
# (publisher only crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (16);
INSERT INTO tab_full VALUES (17);
@@ -209,11 +231,13 @@ $node_publisher->stop('immediate');
$node_publisher->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (16,17);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (16,17);");
is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
@@ -221,7 +245,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
# check that 2PC gets replicated to subscriber
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (21);
SAVEPOINT sp_inner;
@@ -232,7 +257,8 @@ $node_publisher->safe_psql('postgres', "
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# COMMIT
@@ -241,11 +267,13 @@ $node_publisher->safe_psql('postgres', "COMMIT PREPARED 'outer';");
$node_publisher->wait_for_catchup($appname);
# check the transaction state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber');
# check inserts are visible. 22 should be rolled back. 21 should be committed.
-$result = $node_subscriber->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
is($result, qq(21), 'Rows committed are on the subscriber');
###############################
@@ -253,14 +281,16 @@ is($result, qq(21), 'Rows committed are on the subscriber');
###############################
# check that 2PC gets replicated to subscriber
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (51);
PREPARE TRANSACTION '';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# ROLLBACK
@@ -269,7 +299,8 @@ $node_publisher->safe_psql('postgres', "ROLLBACK PREPARED '';");
# check that 2PC gets aborted on subscriber
$node_publisher->wait_for_catchup($appname);
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -277,11 +308,15 @@ is($result, qq(0), 'transaction is aborted on subscriber');
###############################
#create some test tables for copy tests
-$node_publisher->safe_psql('postgres', "CREATE TABLE tab_copy (a int PRIMARY KEY)");
-$node_publisher->safe_psql('postgres', "INSERT INTO tab_copy SELECT generate_series(1,5);");
-$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_copy SELECT generate_series(1,5);");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_copy (a int PRIMARY KEY)");
$node_subscriber->safe_psql('postgres', "INSERT INTO tab_copy VALUES (88);");
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(1), 'initial data in subscriber table');
# Setup logical replication
@@ -289,7 +324,8 @@ $node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_copy FOR TABLE tab_copy;");
my $appname_copy = 'appname_copy';
-$node_subscriber->safe_psql('postgres', "
+$node_subscriber->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub_copy
CONNECTION '$publisher_connstr application_name=$appname_copy'
PUBLICATION tap_pub_copy
@@ -307,11 +343,13 @@ $node_subscriber->poll_query_until('postgres', $twophase_query)
or die "Timed out while waiting for subscriber to enable twophase";
# Check that the initial table data was NOT replicated (because we said copy_data=false)
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(1), 'initial data in subscriber table');
# Now do a prepare on publisher and check that it IS replicated
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_copy VALUES (99);
PREPARE TRANSACTION 'mygid';");
@@ -322,18 +360,21 @@ $node_publisher->wait_for_catchup($appname);
# Check that the transaction has been prepared on the subscriber, there will be 2
# prepared transactions for the 2 subscriptions.
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(2), 'transaction is prepared on subscriber');
# Now commit the insert and verify that it IS replicated
$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'mygid';");
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(6), 'publisher inserted data');
$node_publisher->wait_for_catchup($appname_copy);
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(2), 'replicated data in subscriber table');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_copy;");
@@ -345,16 +386,21 @@ $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_copy;");
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber');
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/022_twophase_cascade.pl b/src/test/subscription/t/022_twophase_cascade.pl
index 900c25d5ce2..7a797f37bad 100644
--- a/src/test/subscription/t/022_twophase_cascade.pl
+++ b/src/test/subscription/t/022_twophase_cascade.pl
@@ -20,7 +20,8 @@ use Test::More;
# node_A
my $node_A = PostgreSQL::Test::Cluster->new('node_A');
$node_A->init(allows_streaming => 'logical');
-$node_A->append_conf('postgresql.conf', qq(
+$node_A->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
@@ -28,7 +29,8 @@ $node_A->start;
# node_B
my $node_B = PostgreSQL::Test::Cluster->new('node_B');
$node_B->init(allows_streaming => 'logical');
-$node_B->append_conf('postgresql.conf', qq(
+$node_B->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
@@ -36,23 +38,22 @@ $node_B->start;
# node_C
my $node_C = PostgreSQL::Test::Cluster->new('node_C');
$node_C->init(allows_streaming => 'logical');
-$node_C->append_conf('postgresql.conf', qq(
+$node_C->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
$node_C->start;
# Create some pre-existing content on node_A
-$node_A->safe_psql('postgres',
- "CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_A->safe_psql(
+ 'postgres', "
INSERT INTO tab_full SELECT generate_series(1,10);");
# Create the same tables on node_B and node_C
-$node_B->safe_psql('postgres',
- "CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_C->safe_psql('postgres',
- "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_B->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_C->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
# Create some pre-existing content on node_A (for streaming tests)
$node_A->safe_psql('postgres',
@@ -63,9 +64,11 @@ $node_A->safe_psql('postgres',
# Create the same tables on node_B and node_C
# columns a and b are compatible with same table name on node_A
$node_B->safe_psql('postgres',
- "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
$node_C->safe_psql('postgres',
- "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
# Setup logical replication
@@ -78,7 +81,8 @@ my $node_A_connstr = $node_A->connstr . ' dbname=postgres';
$node_A->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_A FOR TABLE tab_full, test_tab");
my $appname_B = 'tap_sub_B';
-$node_B->safe_psql('postgres', "
+$node_B->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub_B
CONNECTION '$node_A_connstr application_name=$appname_B'
PUBLICATION tap_pub_A
@@ -89,7 +93,8 @@ my $node_B_connstr = $node_B->connstr . ' dbname=postgres';
$node_B->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_B FOR TABLE tab_full, test_tab");
my $appname_C = 'tap_sub_C';
-$node_C->safe_psql('postgres', "
+$node_C->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub_C
CONNECTION '$node_B_connstr application_name=$appname_C'
PUBLICATION tap_pub_B
@@ -100,13 +105,14 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# Also wait for two-phase to be enabled
-my $twophase_query = "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+my $twophase_query =
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
$node_B->poll_query_until('postgres', $twophase_query)
- or die "Timed out while waiting for subscriber to enable twophase";
+ or die "Timed out while waiting for subscriber to enable twophase";
$node_C->poll_query_until('postgres', $twophase_query)
- or die "Timed out while waiting for subscriber to enable twophase";
+ or die "Timed out while waiting for subscriber to enable twophase";
-is(1,1, "Cascade setup is complete");
+is(1, 1, "Cascade setup is complete");
my $result;
@@ -116,7 +122,8 @@ my $result;
###############################
# 2PC PREPARE
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (11);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -125,9 +132,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -137,15 +146,19 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check that transaction was committed on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
is($result, qq(1), 'Row inserted via 2PC has committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
is($result, qq(1), 'Row inserted via 2PC has committed on subscriber C');
# check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber C');
###############################
@@ -154,7 +167,8 @@ is($result, qq(0), 'transaction is committed on subscriber C');
###############################
# 2PC PREPARE
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -163,9 +177,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC ROLLBACK
@@ -175,15 +191,19 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check that transaction is aborted on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
is($result, qq(0), 'Row inserted via 2PC is not present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
is($result, qq(0), 'Row inserted via 2PC is not present on subscriber C');
# check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber C');
###############################
@@ -191,7 +211,8 @@ is($result, qq(0), 'transaction is ended on subscriber C');
###############################
# 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (21);
SAVEPOINT sp_inner;
@@ -204,9 +225,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -216,46 +239,56 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is ended on subscriber
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber C');
# check inserts are visible at subscriber(s).
# 22 should be rolled back.
# 21 should be committed.
-$result = $node_B->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_B->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
is($result, qq(21), 'Rows committed are present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_C->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
is($result, qq(21), 'Rows committed are present on subscriber C');
# ---------------------
# 2PC + STREAMING TESTS
# ---------------------
-my $oldpid_B = $node_A->safe_psql('postgres', "
+my $oldpid_B = $node_A->safe_psql(
+ 'postgres', "
SELECT pid FROM pg_stat_replication
WHERE application_name = '$appname_B' AND state = 'streaming';");
-my $oldpid_C = $node_B->safe_psql('postgres', "
+my $oldpid_C = $node_B->safe_psql(
+ 'postgres', "
SELECT pid FROM pg_stat_replication
WHERE application_name = '$appname_C' AND state = 'streaming';");
# Setup logical replication (streaming = on)
-$node_B->safe_psql('postgres', "
+$node_B->safe_psql(
+ 'postgres', "
ALTER SUBSCRIPTION tap_sub_B
SET (streaming = on);");
-$node_C->safe_psql('postgres', "
+$node_C->safe_psql(
+ 'postgres', "
ALTER SUBSCRIPTION tap_sub_C
SET (streaming = on)");
# Wait for subscribers to finish initialization
-$node_A->poll_query_until('postgres', "
+$node_A->poll_query_until(
+ 'postgres', "
SELECT pid != $oldpid_B FROM pg_stat_replication
WHERE application_name = '$appname_B' AND state = 'streaming';"
) or die "Timed out while waiting for apply to restart";
-$node_B->poll_query_until('postgres', "
+$node_B->poll_query_until(
+ 'postgres', "
SELECT pid != $oldpid_C FROM pg_stat_replication
WHERE application_name = '$appname_C' AND state = 'streaming';"
) or die "Timed out while waiting for apply to restart";
@@ -270,7 +303,8 @@ $node_B->poll_query_until('postgres', "
# Insert, update and delete enough rows to exceed the 64kB limit.
# Then 2PC PREPARE
-$node_A->safe_psql('postgres', q{
+$node_A->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -281,9 +315,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -293,15 +329,23 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check that transaction was committed on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber B, and extra columns have local defaults');
-$result = $node_C->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber C, and extra columns have local defaults');
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber B, and extra columns have local defaults'
+);
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber C, and extra columns have local defaults'
+);
# check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber C');
###############################
@@ -320,7 +364,8 @@ is($result, qq(0), 'transaction is committed on subscriber C');
$node_A->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO test_tab VALUES (9999, 'foobar');
SAVEPOINT sp_inner;
@@ -335,9 +380,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -347,19 +394,23 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is ended on subscriber
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber C');
# check inserts are visible at subscriber(s).
# All the streamed data (prior to the SAVEPOINT) should be rolled back.
# (9999, 'foobar') should be committed.
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM test_tab where b = 'foobar';");
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM test_tab where b = 'foobar';");
is($result, qq(1), 'Rows committed are present on subscriber B');
$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
is($result, qq(3), 'Rows committed are present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM test_tab where b = 'foobar';");
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM test_tab where b = 'foobar';");
is($result, qq(1), 'Rows committed are present on subscriber C');
$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
is($result, qq(3), 'Rows committed are present on subscriber C');
@@ -370,24 +421,36 @@ is($result, qq(3), 'Rows committed are present on subscriber C');
# cleanup the node_B => node_C pub/sub
$node_C->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_C");
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber node C');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber node C');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
-is($result, qq(0), 'check replication origin was dropped on subscriber node C');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber node C');
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+ 'check replication origin was dropped on subscriber node C');
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher node B');
# cleanup the node_A => node_B pub/sub
$node_B->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_B");
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber node B');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber node B');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
-is($result, qq(0), 'check replication origin was dropped on subscriber node B');
-$result = $node_A->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber node B');
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+ 'check replication origin was dropped on subscriber node B');
+$result =
+ $node_A->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher node A');
# shutdown
diff --git a/src/test/subscription/t/023_twophase_stream.pl b/src/test/subscription/t/023_twophase_stream.pl
index 93ce3ef132d..d8475d25a49 100644
--- a/src/test/subscription/t/023_twophase_stream.pl
+++ b/src/test/subscription/t/023_twophase_stream.pl
@@ -15,7 +15,8 @@ use Test::More;
# Initialize publisher node
my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
$node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', qq(
+$node_publisher->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
@@ -24,25 +25,31 @@ $node_publisher->start;
# Create subscriber node
my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
$node_subscriber->init(allows_streaming => 'logical');
-$node_subscriber->append_conf('postgresql.conf', qq(
+$node_subscriber->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
));
$node_subscriber->start;
# Create some pre-existing content on publisher
-$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b varchar)");
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber (columns a and b are compatible with same table name on publisher)
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
# Setup logical replication (streaming = on)
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
-$node_subscriber->safe_psql('postgres', "
+$node_subscriber->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub
CONNECTION '$publisher_connstr application_name=$appname'
PUBLICATION tap_pub
@@ -53,20 +60,21 @@ $node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
- "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Also wait for two-phase to be enabled
my $twophase_query =
- "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
$node_subscriber->poll_query_until('postgres', $twophase_query)
or die "Timed out while waiting for subscriber to enable twophase";
###############################
# Check initial data was copied to subscriber
###############################
-my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(2|2|2), 'check initial data was copied to subscriber');
###############################
@@ -79,7 +87,8 @@ is($result, qq(2|2|2), 'check initial data was copied to subscriber');
# check that 2PC gets replicated to subscriber
# Insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -89,18 +98,24 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# 2PC transaction gets committed
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is committed on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+);
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber');
###############################
@@ -113,10 +128,11 @@ is($result, qq(0), 'transaction is committed on subscriber');
###############################
# First, delete the data except for 2 rows (will be replicated)
-$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
+$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -126,19 +142,24 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# 2PC transaction gets aborted
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is aborted on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(2|2|2), 'Rows inserted by 2PC are rolled back, leaving only the original 2 rows');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|2|2),
+ 'Rows inserted by 2PC are rolled back, leaving only the original 2 rows');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -151,7 +172,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
# Note: both publisher and subscriber do crash/restart.
###############################
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -165,12 +187,16 @@ $node_publisher->start;
$node_subscriber->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+);
###############################
# Do INSERT after the PREPARE but before ROLLBACK PREPARED.
@@ -187,7 +213,8 @@ is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscrib
$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -197,24 +224,29 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# Insert a different record (now we are outside of the 2PC transaction)
# Note: the 2PC transaction still holds row locks so make sure this insert is for a separate primary key
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (99999, 'foobar')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (99999, 'foobar')");
# 2PC transaction gets aborted
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is aborted on subscriber,
# but the extra INSERT outside of the 2PC still was replicated
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(3|3|3), 'check the outside insert was copied to subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -232,7 +264,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -242,24 +275,30 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# Insert a different record (now we are outside of the 2PC transaction)
# Note: the 2PC transaction still holds row locks so make sure this insert is for a separare primary key
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (99999, 'foobar')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (99999, 'foobar')");
# 2PC transaction gets committed
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is committed on subscriber
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3335|3335|3335), 'Rows inserted by 2PC (as well as outside insert) have committed on subscriber, and extra columns contain local defaults');
+is($result, qq(3335|3335|3335),
+ 'Rows inserted by 2PC (as well as outside insert) have committed on subscriber, and extra columns contain local defaults'
+);
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber');
###############################
@@ -268,16 +307,21 @@ is($result, qq(0), 'transaction is committed on subscriber');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber');
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl
index 561ddde4216..246f8c92372 100644
--- a/src/test/subscription/t/024_add_drop_pub.pl
+++ b/src/test/subscription/t/024_add_drop_pub.pl
@@ -30,8 +30,7 @@ $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_1 (a int)");
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_1 FOR TABLE tab_1");
-$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION tap_pub_2");
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_2");
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub_1, tap_pub_2"
diff --git a/src/test/subscription/t/025_rep_changes_for_schema.pl b/src/test/subscription/t/025_rep_changes_for_schema.pl
index 2a6ba5403da..5ce275cf725 100644
--- a/src/test/subscription/t/025_rep_changes_for_schema.pl
+++ b/src/test/subscription/t/025_rep_changes_for_schema.pl
@@ -27,11 +27,14 @@ $node_publisher->safe_psql('postgres',
$node_publisher->safe_psql('postgres',
"CREATE TABLE sch1.tab2 AS SELECT generate_series(1,10) AS a");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+ "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
$node_publisher->safe_psql('postgres',
- "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)");
+ "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
$node_publisher->safe_psql('postgres',
- "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)");
+ "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
$node_publisher->safe_psql('postgres',
"INSERT INTO sch1.tab1_parent values (1),(4)");
@@ -41,11 +44,14 @@ $node_subscriber->safe_psql('postgres', "CREATE SCHEMA sch1");
$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab1 (a int)");
$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab2 (a int)");
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+ "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)");
+ "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)");
+ "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@@ -75,7 +81,7 @@ is($result, qq(10|1|10), 'check rows on subscriber catchup');
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM sch1.tab1_parent order by 1");
-is($result, qq(1|
+is( $result, qq(1|
4|), 'check rows on subscriber catchup');
# Insert some data into few tables and verify that inserted data is replicated
@@ -93,7 +99,7 @@ is($result, qq(20|1|20), 'check replicated inserts on subscriber');
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM sch1.tab1_parent order by 1");
-is($result, qq(1|
+is( $result, qq(1|
2|
4|
5|), 'check replicated inserts on subscriber');
@@ -189,7 +195,8 @@ is($result, qq(3),
# Drop schema from publication, verify that the inserts are not published after
# dropping the schema from publication. Here 2nd insert should not be
# published.
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
INSERT INTO sch1.tab1 VALUES(21);
ALTER PUBLICATION tap_pub_schema DROP ALL TABLES IN SCHEMA sch1;
INSERT INTO sch1.tab1 values(22);"
diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl
index 4815e6ccffe..350bc40efcb 100644
--- a/src/test/subscription/t/027_nosuperuser.pl
+++ b/src/test/subscription/t/027_nosuperuser.pl
@@ -12,8 +12,9 @@ $offset = 0;
sub publish_insert
{
- my ($tbl, $new_i) = @_;
- $node_publisher->safe_psql('postgres', qq(
+ my ($tbl, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
INSERT INTO $tbl (i) VALUES ($new_i);
));
@@ -21,8 +22,9 @@ sub publish_insert
sub publish_update
{
- my ($tbl, $old_i, $new_i) = @_;
- $node_publisher->safe_psql('postgres', qq(
+ my ($tbl, $old_i, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
UPDATE $tbl SET i = $new_i WHERE i = $old_i;
));
@@ -30,8 +32,9 @@ sub publish_update
sub publish_delete
{
- my ($tbl, $old_i) = @_;
- $node_publisher->safe_psql('postgres', qq(
+ my ($tbl, $old_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
DELETE FROM $tbl WHERE i = $old_i;
));
@@ -39,47 +42,53 @@ sub publish_delete
sub expect_replication
{
- my ($tbl, $cnt, $min, $max, $testname) = @_;
- $node_publisher->wait_for_catchup('admin_sub');
- $result = $node_subscriber->safe_psql('postgres', qq(
+ my ($tbl, $cnt, $min, $max, $testname) = @_;
+ $node_publisher->wait_for_catchup('admin_sub');
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
- is ($result, "$cnt|$min|$max", $testname);
+ is($result, "$cnt|$min|$max", $testname);
}
sub expect_failure
{
- my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
- $offset = $node_subscriber->wait_for_log($re, $offset);
- $result = $node_subscriber->safe_psql('postgres', qq(
+ my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
+ $offset = $node_subscriber->wait_for_log($re, $offset);
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
- is ($result, "$cnt|$min|$max", $testname);
+ is($result, "$cnt|$min|$max", $testname);
}
sub revoke_superuser
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role NOSUPERUSER));
}
sub grant_superuser
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role SUPERUSER));
}
sub revoke_bypassrls
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role NOBYPASSRLS));
}
sub grant_bypassrls
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role BYPASSRLS));
}
@@ -88,7 +97,7 @@ sub grant_bypassrls
# "regress_admin". For partitioned tables, layout the partitions differently
# on the publisher than on the subscriber.
#
-$node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher = PostgreSQL::Test::Cluster->new('publisher');
$node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
$node_publisher->init(allows_streaming => 'logical');
$node_subscriber->init;
@@ -96,17 +105,18 @@ $node_publisher->start;
$node_subscriber->start;
$publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
my %remainder_a = (
- publisher => 0,
- subscriber => 1);
+ publisher => 0,
+ subscriber => 1);
my %remainder_b = (
- publisher => 1,
- subscriber => 0);
+ publisher => 1,
+ subscriber => 0);
for my $node ($node_publisher, $node_subscriber)
{
- my $remainder_a = $remainder_a{$node->name};
- my $remainder_b = $remainder_b{$node->name};
- $node->safe_psql('postgres', qq(
+ my $remainder_a = $remainder_a{ $node->name };
+ my $remainder_b = $remainder_b{ $node->name };
+ $node->safe_psql(
+ 'postgres', qq(
CREATE ROLE regress_admin SUPERUSER LOGIN;
CREATE ROLE regress_alice NOSUPERUSER LOGIN;
GRANT CREATE ON DATABASE postgres TO regress_alice;
@@ -129,14 +139,16 @@ for my $node ($node_publisher, $node_subscriber)
ALTER TABLE alice.hashpart_b REPLICA IDENTITY FULL;
));
}
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
CREATE PUBLICATION alice
FOR TABLE alice.unpartitioned, alice.hashpart
WITH (publish_via_partition_root = true);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_admin;
CREATE SUBSCRIPTION admin_sub CONNECTION '$publisher_connstr' PUBLICATION alice;
));
@@ -156,9 +168,8 @@ publish_insert("alice.unpartitioned", 3);
publish_insert("alice.unpartitioned", 5);
publish_update("alice.unpartitioned", 1 => 7);
publish_delete("alice.unpartitioned", 3);
-expect_replication(
- "alice.unpartitioned", 2, 5, 7,
- "superuser admin replicates into unpartitioned");
+expect_replication("alice.unpartitioned", 2, 5, 7,
+ "superuser admin replicates into unpartitioned");
# Revoke and restore superuser privilege for "regress_admin",
# verifying that replication fails while superuser privilege is
@@ -166,12 +177,13 @@ expect_replication(
#
revoke_superuser("regress_admin");
publish_update("alice.unpartitioned", 5 => 9);
-expect_failure("alice.unpartitioned", 2, 5, 7,
- qr/ERROR: permission denied for table unpartitioned/msi,
- "non-superuser admin fails to replicate update");
+expect_failure(
+ "alice.unpartitioned", 2, 5, 7,
+ qr/ERROR: permission denied for table unpartitioned/msi,
+ "non-superuser admin fails to replicate update");
grant_superuser("regress_admin");
expect_replication("alice.unpartitioned", 2, 7, 9,
- "admin with restored superuser privilege replicates update");
+ "admin with restored superuser privilege replicates update");
# Grant INSERT, UPDATE, DELETE privileges on the target tables to
# "regress_admin" so that superuser privileges are not necessary for
@@ -180,7 +192,8 @@ expect_replication("alice.unpartitioned", 2, 7, 9,
# Note that UPDATE and DELETE also require SELECT privileges, which
# will be granted in subsequent test.
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE regress_admin NOSUPERUSER;
SET SESSION AUTHORIZATION regress_alice;
GRANT INSERT,UPDATE,DELETE ON
@@ -192,16 +205,23 @@ REVOKE SELECT ON alice.unpartitioned FROM regress_admin;
publish_insert("alice.unpartitioned", 11);
expect_replication("alice.unpartitioned", 3, 7, 11,
- "nosuperuser admin with INSERT privileges can replicate into unpartitioned");
+ "nosuperuser admin with INSERT privileges can replicate into unpartitioned"
+);
publish_update("alice.unpartitioned", 7 => 13);
-expect_failure("alice.unpartitioned", 3, 7, 11,
- qr/ERROR: permission denied for table unpartitioned/msi,
- "non-superuser admin without SELECT privileges fails to replicate update");
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 7,
+ 11,
+ qr/ERROR: permission denied for table unpartitioned/msi,
+ "non-superuser admin without SELECT privileges fails to replicate update"
+);
# Now grant SELECT
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
GRANT SELECT ON
alice.unpartitioned,
@@ -211,7 +231,8 @@ GRANT SELECT ON
publish_delete("alice.unpartitioned", 9);
expect_replication("alice.unpartitioned", 2, 11, 13,
- "nosuperuser admin with all table privileges can replicate into unpartitioned");
+ "nosuperuser admin with all table privileges can replicate into unpartitioned"
+);
# Test partitioning
#
@@ -221,50 +242,68 @@ publish_insert("alice.hashpart", 103);
publish_update("alice.hashpart", 102 => 120);
publish_delete("alice.hashpart", 101);
expect_replication("alice.hashpart", 2, 103, 120,
- "nosuperuser admin with all table privileges can replicate into hashpart");
+ "nosuperuser admin with all table privileges can replicate into hashpart"
+);
# Enable RLS on the target table and check that "regress_admin" can
# only replicate into it when superuser or bypassrls.
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
ALTER TABLE alice.unpartitioned ENABLE ROW LEVEL SECURITY;
));
revoke_superuser("regress_admin");
publish_insert("alice.unpartitioned", 15);
-expect_failure("alice.unpartitioned", 2, 11, 13,
- qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
- "non-superuser admin fails to replicate insert into rls enabled table");
+expect_failure(
+ "alice.unpartitioned",
+ 2,
+ 11,
+ 13,
+ qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "non-superuser admin fails to replicate insert into rls enabled table");
grant_superuser("regress_admin");
expect_replication("alice.unpartitioned", 3, 11, 15,
- "admin with restored superuser privilege replicates insert into rls enabled unpartitioned");
+ "admin with restored superuser privilege replicates insert into rls enabled unpartitioned"
+);
revoke_superuser("regress_admin");
publish_update("alice.unpartitioned", 11 => 17);
-expect_failure("alice.unpartitioned", 3, 11, 15,
- qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
- "non-superuser admin fails to replicate update into rls enabled unpartitioned");
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 11,
+ 15,
+ qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "non-superuser admin fails to replicate update into rls enabled unpartitioned"
+);
grant_bypassrls("regress_admin");
expect_replication("alice.unpartitioned", 3, 13, 17,
- "admin with bypassrls replicates update into rls enabled unpartitioned");
+ "admin with bypassrls replicates update into rls enabled unpartitioned");
revoke_bypassrls("regress_admin");
publish_delete("alice.unpartitioned", 13);
-expect_failure("alice.unpartitioned", 3, 13, 17,
- qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
- "non-superuser admin without bypassrls fails to replicate delete into rls enabled unpartitioned");
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 13,
+ 17,
+ qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "non-superuser admin without bypassrls fails to replicate delete into rls enabled unpartitioned"
+);
grant_bypassrls("regress_admin");
expect_replication("alice.unpartitioned", 2, 15, 17,
- "admin with bypassrls replicates delete into rls enabled unpartitioned");
+ "admin with bypassrls replicates delete into rls enabled unpartitioned");
grant_superuser("regress_admin");
# Alter the subscription owner to "regress_alice". She has neither superuser
# nor bypassrls, but as the table owner should be able to replicate.
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION admin_sub DISABLE;
ALTER ROLE regress_alice SUPERUSER;
ALTER SUBSCRIPTION admin_sub OWNER TO regress_alice;
@@ -275,8 +314,8 @@ ALTER SUBSCRIPTION admin_sub ENABLE;
publish_insert("alice.unpartitioned", 23);
publish_update("alice.unpartitioned", 15 => 25);
publish_delete("alice.unpartitioned", 17);
-expect_replication(
- "alice.unpartitioned", 2, 23, 25,
- "nosuperuser nobypassrls table owner can replicate delete into unpartitioned despite rls");
+expect_replication("alice.unpartitioned", 2, 23, 25,
+ "nosuperuser nobypassrls table owner can replicate delete into unpartitioned despite rls"
+);
done_testing();
diff --git a/src/test/subscription/t/028_row_filter.pl b/src/test/subscription/t/028_row_filter.pl
index 82c4eb6ef62..0dc0a6d10f5 100644
--- a/src/test/subscription/t/028_row_filter.pl
+++ b/src/test/subscription/t/028_row_filter.pl
@@ -291,8 +291,7 @@ $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_rowfilter_viaroot_part (a int)");
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_rowfilter_viaroot_part_1 (a int)"
-);
+ "CREATE TABLE tab_rowfilter_viaroot_part_1 (a int)");
# setup logical replication
$node_publisher->safe_psql('postgres',
@@ -720,18 +719,14 @@ is($result, qq(t|1), 'check replicated rows to tab_rowfilter_toast');
$result =
$node_subscriber->safe_psql('postgres',
"SELECT a FROM tab_rowfilter_viaroot_part");
-is( $result, qq(16),
- 'check replicated rows to tab_rowfilter_viaroot_part'
-);
+is($result, qq(16), 'check replicated rows to tab_rowfilter_viaroot_part');
# Check there is no data in tab_rowfilter_viaroot_part_1 because rows are
# replicated via the top most parent table tab_rowfilter_viaroot_part
$result =
$node_subscriber->safe_psql('postgres',
"SELECT a FROM tab_rowfilter_viaroot_part_1");
-is( $result, qq(),
- 'check replicated rows to tab_rowfilter_viaroot_part_1'
-);
+is($result, qq(), 'check replicated rows to tab_rowfilter_viaroot_part_1');
# Testcase end: FOR TABLE with row filter publications
# ======================================================
diff --git a/src/test/subscription/t/031_column_list.pl b/src/test/subscription/t/031_column_list.pl
index bdcf3e4a248..19812e11f31 100644
--- a/src/test/subscription/t/031_column_list.pl
+++ b/src/test/subscription/t/031_column_list.pl
@@ -26,51 +26,60 @@ sub wait_for_subscription_sync
my ($node) = @_;
# Also wait for initial table sync to finish
- my $synced_query = "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ my $synced_query =
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node->poll_query_until('postgres', $synced_query)
- or die "Timed out while waiting for subscriber to synchronize data";
+ or die "Timed out while waiting for subscriber to synchronize data";
}
# setup tables on both nodes
# tab1: simple 1:1 replication
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
));
# tab2: replication from regular to table with fewer columns
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab2 (a int PRIMARY KEY, b varchar, c int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab2 (a int PRIMARY KEY, b varchar)
));
# tab3: simple 1:1 replication with weird column names
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab3 ("a'" int PRIMARY KEY, "B" varchar, "c'" int)
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab3 ("a'" int PRIMARY KEY, "c'" int)
));
# test_part: partitioned tables, with partitioning (including multi-level
# partitioning, and fewer columns on the subscriber)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part (a int PRIMARY KEY, b text, c timestamptz) PARTITION BY LIST (a);
CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
CREATE TABLE test_part_2_2 PARTITION OF test_part_2_1 FOR VALUES IN (7,8,9,10);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part (a int PRIMARY KEY, b text) PARTITION BY LIST (a);
CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
@@ -78,12 +87,14 @@ $node_subscriber->safe_psql('postgres', qq(
));
# tab4: table with user-defined enum types
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TYPE test_typ AS ENUM ('blue', 'red');
CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, c int, d text);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TYPE test_typ AS ENUM ('blue', 'red');
CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, d text);
));
@@ -91,7 +102,8 @@ $node_subscriber->safe_psql('postgres', qq(
# TEST: create publication and subscription for some of the tables with
# column lists
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub1
FOR TABLE tab1 (a, "B"), tab3 ("a'", "c'"), test_part (a, b), tab4 (a, b, d)
WITH (publish_via_partition_root = 'true');
@@ -99,36 +111,41 @@ $node_publisher->safe_psql('postgres', qq(
# check that we got the right prattrs values for the publication in the
# pg_publication_rel catalog (order by relname, to get stable ordering)
-my $result = $node_publisher->safe_psql('postgres', qq(
+my $result = $node_publisher->safe_psql(
+ 'postgres', qq(
SELECT relname, prattrs
FROM pg_publication_rel pb JOIN pg_class pc ON(pb.prrelid = pc.oid)
ORDER BY relname
));
-is($result, qq(tab1|1 2
+is( $result, qq(tab1|1 2
tab3|1 3
tab4|1 2 4
test_part|1 2), 'publication relation updated');
# TEST: insert data into the tables, create subscription and see if sync
# replicates the right columns
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab1 VALUES (1, 2, 3);
INSERT INTO tab1 VALUES (4, 5, 6);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab3 VALUES (1, 2, 3);
INSERT INTO tab3 VALUES (4, 5, 6);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab4 VALUES (1, 'red', 3, 'oh my');
INSERT INTO tab4 VALUES (2, 'blue', 4, 'hello');
));
# replication of partitioned table
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part VALUES (1, 'abc', '2021-07-04 12:00:00');
INSERT INTO test_part VALUES (2, 'bcd', '2021-07-03 11:12:13');
INSERT INTO test_part VALUES (7, 'abc', '2021-07-04 12:00:00');
@@ -137,34 +154,35 @@ $node_publisher->safe_psql('postgres', qq(
# create subscription for the publication, wait for sync to complete,
# then check the sync results
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1
));
wait_for_subscription_sync($node_subscriber);
# tab1: only (a,b) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab1 ORDER BY a");
-is($result, qq(1|2|
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
4|5|), 'insert on column tab1.c is not replicated');
# tab3: only (a,c) is replicated
$result = $node_subscriber->safe_psql('postgres',
qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result, qq(1|3
+is( $result, qq(1|3
4|6), 'insert on column tab3.b is not replicated');
# tab4: only (a,b,d) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab4 ORDER BY a");
-is($result, qq(1|red|oh my
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
2|blue|hello), 'insert on column tab4.c is not replicated');
# test_part: (a,b) is replicated
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM test_part ORDER BY a");
-is($result, qq(1|abc
+is( $result, qq(1|abc
2|bcd
7|abc
8|bcd), 'insert on column test_part.c columns is not replicated');
@@ -173,23 +191,27 @@ is($result, qq(1|abc
# TEST: now insert more data into the tables, and wait until we replicate
# them (not by tablesync, but regular decoding and replication)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab1 VALUES (2, 3, 4);
INSERT INTO tab1 VALUES (5, 6, 7);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab3 VALUES (2, 3, 4);
INSERT INTO tab3 VALUES (5, 6, 7);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab4 VALUES (3, 'red', 5, 'foo');
INSERT INTO tab4 VALUES (4, 'blue', 6, 'bar');
));
# replication of partitioned table
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part VALUES (3, 'xxx', '2022-02-01 10:00:00');
INSERT INTO test_part VALUES (4, 'yyy', '2022-03-02 15:12:13');
INSERT INTO test_part VALUES (9, 'zzz', '2022-04-03 21:00:00');
@@ -200,9 +222,9 @@ $node_publisher->safe_psql('postgres', qq(
$node_publisher->wait_for_catchup('sub1');
# tab1: only (a,b) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab1 ORDER BY a");
-is($result, qq(1|2|
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
2|3|
4|5|
5|6|), 'insert on column tab1.c is not replicated');
@@ -210,15 +232,15 @@ is($result, qq(1|2|
# tab3: only (a,c) is replicated
$result = $node_subscriber->safe_psql('postgres',
qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result, qq(1|3
+is( $result, qq(1|3
2|4
4|6
5|7), 'insert on column tab3.b is not replicated');
# tab4: only (a,b,d) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab4 ORDER BY a");
-is($result, qq(1|red|oh my
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
2|blue|hello
3|red|foo
4|blue|bar), 'insert on column tab4.c is not replicated');
@@ -226,7 +248,7 @@ is($result, qq(1|red|oh my
# test_part: (a,b) is replicated
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM test_part ORDER BY a");
-is($result, qq(1|abc
+is( $result, qq(1|abc
2|bcd
3|xxx
4|yyy
@@ -257,36 +279,38 @@ $node_publisher->safe_psql('postgres',
# tab4
$node_publisher->safe_psql('postgres',
- qq(UPDATE tab4 SET b = 'blue', c = c * 2, d = d || ' updated' where a = 1));
+ qq(UPDATE tab4 SET b = 'blue', c = c * 2, d = d || ' updated' where a = 1)
+);
# tab4
$node_publisher->safe_psql('postgres',
- qq(UPDATE tab4 SET b = 'red', c = c * 2, d = d || ' updated' where a = 2));
+ qq(UPDATE tab4 SET b = 'red', c = c * 2, d = d || ' updated' where a = 2)
+);
# wait for the replication to catch up, and check the UPDATE results got
# replicated correctly, with the right column list
$node_publisher->wait_for_catchup('sub1');
-$result = $node_subscriber->safe_psql('postgres',
- qq(SELECT * FROM tab1 ORDER BY a));
-is($result,
-qq(1|4|
+$result =
+ $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab1 ORDER BY a));
+is( $result,
+ qq(1|4|
2|3|
4|5|
5|6|), 'only update on column tab1.b is replicated');
$result = $node_subscriber->safe_psql('postgres',
qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result,
-qq(1|6
+is( $result,
+ qq(1|6
2|4
4|6
5|7), 'only update on column tab3.c is replicated');
-$result = $node_subscriber->safe_psql('postgres',
- qq(SELECT * FROM tab4 ORDER BY a));
+$result =
+ $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab4 ORDER BY a));
-is($result, qq(1|blue|oh my updated
+is( $result, qq(1|blue|oh my updated
2|red|hello updated
3|red|foo
4|blue|bar), 'update on column tab4.c is not replicated');
@@ -295,7 +319,8 @@ is($result, qq(1|blue|oh my updated
# TEST: add table with a column list, insert data, replicate
# insert some data before adding it to the publication
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab2 VALUES (1, 'abc', 3);
));
@@ -309,34 +334,37 @@ $node_subscriber->safe_psql('postgres',
# the results of the replication
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab2 VALUES (2, 'def', 6);
));
$node_publisher->wait_for_catchup('sub1');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab2 ORDER BY a");
-is($result, qq(1|abc
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
2|def), 'insert on column tab2.c is not replicated');
# do a couple updates, check the correct stuff gets replicated
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
UPDATE tab2 SET c = 5 where a = 1;
UPDATE tab2 SET b = 'xyz' where a = 2;
));
$node_publisher->wait_for_catchup('sub1');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab2 ORDER BY a");
-is($result, qq(1|abc
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
2|xyz), 'update on column tab2.c is not replicated');
# TEST: add a table to two publications with different column lists, and
# create a single subscription replicating both publications
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab5 (a int PRIMARY KEY, b int, c int, d int);
CREATE PUBLICATION pub2 FOR TABLE tab5 (a, b);
CREATE PUBLICATION pub3 FOR TABLE tab5 (a, d);
@@ -346,11 +374,13 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO tab5 VALUES (2, 22, 222, 2222);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab5 (a int PRIMARY KEY, b int, d int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub3
));
@@ -360,52 +390,57 @@ $node_publisher->wait_for_catchup('sub1');
# insert data and make sure all the columns (union of the columns lists)
# get fully replicated
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab5 VALUES (3, 33, 333, 3333);
INSERT INTO tab5 VALUES (4, 44, 444, 4444);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab5 ORDER BY a"),
- qq(1|11|1111
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab5 ORDER BY a"),
+ qq(1|11|1111
2|22|2222
3|33|3333
4|44|4444),
- 'overlapping publications with overlapping column lists');
+ 'overlapping publications with overlapping column lists');
# and finally, remove the column list for one of the publications, which
# means replicating all columns (removing the column list), but first add
# the missing column to the table on subscriber
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER PUBLICATION pub3 SET TABLE tab5;
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
ALTER TABLE tab5 ADD COLUMN c INT;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab5 VALUES (5, 55, 555, 5555);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab5 ORDER BY a"),
- qq(1|11|1111|
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab5 ORDER BY a"),
+ qq(1|11|1111|
2|22|2222|
3|33|3333|
4|44|4444|
5|55|5555|555),
- 'overlapping publications with overlapping column lists');
+ 'overlapping publications with overlapping column lists');
# TEST: create a table with a column list, then change the replica
# identity by replacing a primary key (but use a different column in
# the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
CREATE PUBLICATION pub4 FOR TABLE tab6 (a, b);
@@ -413,31 +448,35 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO tab6 VALUES (1, 22, 333, 4444);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub4
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab6 VALUES (2, 33, 444, 5555);
UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab6 ORDER BY a"),
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
qq(1|44||
2|66||), 'replication with the original primary key');
# now redefine the constraint - move the primary key to a different column
# (which is still covered by the column list, though)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
ALTER TABLE tab6 ADD PRIMARY KEY (b);
));
@@ -445,35 +484,39 @@ $node_publisher->safe_psql('postgres', qq(
# we need to do the same thing on the subscriber
# XXX What would happen if this happens before the publisher ALTER? Or
# interleaved, somehow? But that seems unrelated to column lists.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
ALTER TABLE tab6 ADD PRIMARY KEY (b);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab6 VALUES (3, 55, 666, 8888);
UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab6 ORDER BY a"),
- qq(1|88||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
+ qq(1|88||
2|132||
3|110||),
- 'replication with the modified primary key');
+ 'replication with the modified primary key');
# TEST: create a table with a column list, then change the replica
# identity by replacing a primary key with a key on multiple columns
# (all of them covered by the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
CREATE PUBLICATION pub5 FOR TABLE tab7 (a, b);
@@ -481,52 +524,58 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO tab7 VALUES (1, 22, 333, 4444);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub5
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab7 VALUES (2, 33, 444, 5555);
UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
- qq(1|44||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(1|44||
2|66||), 'replication with the original primary key');
# now redefine the constraint - move the primary key to a different column
# (which is not covered by the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
ALTER TABLE tab7 ADD PRIMARY KEY (a, b);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab7 VALUES (3, 55, 666, 7777);
UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
- qq(1|88||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(1|88||
2|132||
3|110||),
- 'replication with the modified primary key');
+ 'replication with the modified primary key');
# now switch the primary key again to another columns not covered by the
# column list, but also generate writes between the drop and creation
# of the new constraint
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
INSERT INTO tab7 VALUES (4, 77, 888, 9999);
-- update/delete is not allowed for tables without RI
@@ -535,16 +584,17 @@ $node_publisher->safe_psql('postgres', qq(
DELETE FROM tab7 WHERE a = 1;
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
- qq(2|264||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(2|264||
3|220||
4|154||),
- 'replication with the modified primary key');
+ 'replication with the modified primary key');
# TEST: partitioned tables (with publish_via_partition_root = false)
@@ -555,7 +605,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
# First, let's create a partitioned table with two partitions, each with
# a different RI, but a column list not covering all those RI.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_a (a int, b int, c int) PARTITION BY LIST (a);
CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
@@ -572,7 +623,8 @@ $node_publisher->safe_psql('postgres', qq(
));
# do the same thing on the subscriber (with the opposite column order)
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_a (b int, a int) PARTITION BY LIST (a);
CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
@@ -586,38 +638,43 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating just the column "a", which is not enough
# for the second partition
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub6 FOR TABLE test_part_a (b, a) WITH (publish_via_partition_root = true);
ALTER PUBLICATION pub6 ADD TABLE test_part_a_1 (a);
ALTER PUBLICATION pub6 ADD TABLE test_part_a_2 (b);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub6
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_a VALUES (2, 5);
INSERT INTO test_part_a VALUES (7, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT a, b FROM test_part_a ORDER BY a, b"),
- qq(1|3
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT a, b FROM test_part_a ORDER BY a, b"),
+ qq(1|3
2|5
6|4
7|6),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# This time start with a column list covering RI for all partitions, but
# then update the column list to not cover column "b" (needed by the
# second partition)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
@@ -634,7 +691,8 @@ $node_publisher->safe_psql('postgres', qq(
));
# do the same thing on the subscriber
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
@@ -648,37 +706,42 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating both columns, which is sufficient for
# both partitions
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub7 FOR TABLE test_part_b (a, b) WITH (publish_via_partition_root = true);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub7
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_b VALUES (2, 3);
INSERT INTO test_part_b VALUES (7, 4);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_b ORDER BY a, b"),
- qq(1|1
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_b ORDER BY a, b"),
+ qq(1|1
2|3
6|2
7|4),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# TEST: This time start with a column list covering RI for all partitions,
# but then update RI for one of the partitions to not be covered by the
# column list anymore.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
@@ -695,7 +758,8 @@ $node_publisher->safe_psql('postgres', qq(
));
# do the same thing on the subscriber
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
@@ -710,39 +774,44 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating data through partition root, with a column
# list on the root, and then add the partitions one by one with separate
# column lists (but those are not applied)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a,c);
ALTER PUBLICATION pub8 ADD TABLE test_part_c_2 (a,b);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SUBSCRIPTION sub1;
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub8;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_c VALUES (3, 7, 8);
INSERT INTO test_part_c VALUES (4, 9, 10);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_c ORDER BY a, b"),
- qq(1||5
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+ qq(1||5
2|4|
3||8
4|9|),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# create a publication not replicating data through partition root, without
# a column list on the root, and then add the partitions one by one with
# separate column lists
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP PUBLICATION pub8;
CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a);
@@ -750,14 +819,16 @@ $node_publisher->safe_psql('postgres', qq(
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
TRUNCATE test_part_c;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
TRUNCATE test_part_c;
INSERT INTO test_part_c VALUES (1, 3, 5);
INSERT INTO test_part_c VALUES (2, 4, 6);
@@ -765,16 +836,18 @@ $node_publisher->safe_psql('postgres', qq(
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_c ORDER BY a, b"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+ qq(1||
2|4|),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# TEST: Start with a single partition, with RI compatible with the column
# list, and then attach a partition with incompatible RI.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
@@ -786,7 +859,8 @@ $node_publisher->safe_psql('postgres', qq(
# do the same thing on the subscriber (in fact, create both partitions right
# away, no need to delay that)
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
@@ -800,33 +874,38 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating both columns, which is sufficient for
# both partitions
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub9 FOR TABLE test_part_d (a) WITH (publish_via_partition_root = true);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub9
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_d VALUES (3, 4);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_d ORDER BY a, b"),
- qq(1|
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_d ORDER BY a, b"),
+ qq(1|
3|),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# TEST: With a table included in multiple publications, we should use a
# union of the column lists. So with column lists (a,b) and (a,c) we
# should replicate (a,b,c).
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
CREATE PUBLICATION pub_mix_1 FOR TABLE test_mix_1 (a, b);
CREATE PUBLICATION pub_mix_2 FOR TABLE test_mix_1 (a, c);
@@ -835,23 +914,26 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_mix_1 VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub_mix_1, pub_mix_2;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_mix_1 VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_1 ORDER BY a"),
- qq(1|2|3
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_mix_1 ORDER BY a"),
+ qq(1|2|3
4|5|6),
- 'a mix of publications should use a union of column list');
+ 'a mix of publications should use a union of column list');
# TEST: With a table included in multiple publications, we should use a
@@ -859,12 +941,14 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_1 ORDER BY a")
# TABLES, we should replicate all columns.
# drop unnecessary tables, so as not to interfere with the FOR ALL TABLES
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP TABLE tab1, tab2, tab3, tab4, tab5, tab6, tab7, test_mix_1,
test_part, test_part_a, test_part_b, test_part_c, test_part_d;
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
CREATE PUBLICATION pub_mix_3 FOR TABLE test_mix_2 (a, b);
CREATE PUBLICATION pub_mix_4 FOR ALL TABLES;
@@ -873,7 +957,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_mix_2 VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub_mix_3, pub_mix_4;
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
@@ -881,28 +966,31 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_mix_2 VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_2"),
- qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_2"),
+ qq(1|2|3
4|5|6),
- 'a mix of publications should use a union of column list');
+ 'a mix of publications should use a union of column list');
# TEST: With a table included in multiple publications, we should use a
# union of the column lists. If any of the publications is FOR ALL
# TABLES IN SCHEMA, we should replicate all columns.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SUBSCRIPTION sub1;
CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP TABLE test_mix_2;
CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
CREATE PUBLICATION pub_mix_5 FOR TABLE test_mix_3 (a, b);
@@ -912,22 +1000,24 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_mix_3 VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_5, pub_mix_6;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_mix_3 VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_3"),
- qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_3"),
+ qq(1|2|3
4|5|6),
- 'a mix of publications should use a union of column list');
+ 'a mix of publications should use a union of column list');
# TEST: Check handling of publish_via_partition_root - if a partition is
@@ -935,7 +1025,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_3"),
# defined for the whole table (not the partitions) - both during the initial
# sync and when replicating changes. This is what we do for row filters.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SUBSCRIPTION sub1;
CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
@@ -943,7 +1034,8 @@ $node_subscriber->safe_psql('postgres', qq(
CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE test_root_1 PARTITION OF test_root FOR VALUES FROM (1) TO (10);
CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
@@ -955,25 +1047,28 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_root VALUES (10, 20, 30);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_root_true;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_root VALUES (2, 3, 4);
INSERT INTO test_root VALUES (11, 21, 31);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_root ORDER BY a, b, c"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_root ORDER BY a, b, c"),
+ qq(1||
2||
10||
11||),
- 'publication via partition root applies column list');
+ 'publication via partition root applies column list');
# TEST: Multiple publications which publish schema of parent table and
@@ -982,7 +1077,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_root ORDER BY a, b
# also directly (with a columns list). The expected outcome is there is
# no column list.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP PUBLICATION pub1, pub2, pub3, pub4, pub5, pub6, pub7, pub8;
CREATE SCHEMA s1;
@@ -996,7 +1092,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO s1.t VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SCHEMA s1;
CREATE TABLE s1.t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF s1.t FOR VALUES FROM (1) TO (10);
@@ -1006,21 +1103,23 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO s1.t VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM s1.t ORDER BY a"),
- qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+ qq(1|2|3
4|5|6),
- 'two publications, publishing the same relation');
+ 'two publications, publishing the same relation');
# Now resync the subcription, but with publications in the opposite order.
# The result should be the same.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
TRUNCATE s1.t;
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub1;
@@ -1028,22 +1127,24 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO s1.t VALUES (7, 8, 9);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM s1.t ORDER BY a"),
- qq(7|8|9),
- 'two publications, publishing the same relation');
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+ qq(7|8|9),
+ 'two publications, publishing the same relation');
# TEST: One publication, containing both the parent and child relations.
# The expected outcome is list "a", because that's the column list defined
# for the top-most ancestor added to the publication.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP SCHEMA s1 CASCADE;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1057,7 +1158,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO t VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SCHEMA s1 CASCADE;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1069,16 +1171,18 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO t VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+ qq(1||
4||),
- 'publication containing both parent and child relation');
+ 'publication containing both parent and child relation');
# TEST: One publication, containing both the parent and child relations.
@@ -1087,7 +1191,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
# Note: The difference from the preceding test is that in this case both
# relations have a column list defined.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP TABLE t;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1101,7 +1206,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO t VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP TABLE t;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1113,16 +1219,18 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO t VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+ qq(1||
4||),
- 'publication containing both parent and child relation');
+ 'publication containing both parent and child relation');
$node_subscriber->stop('fast');
diff --git a/src/tools/PerfectHash.pm b/src/tools/PerfectHash.pm
index 0803698d579..4ffb6bd5474 100644
--- a/src/tools/PerfectHash.pm
+++ b/src/tools/PerfectHash.pm
@@ -89,6 +89,7 @@ sub generate_hash_function
FIND_PARAMS:
for ($hash_seed1 = 0; $hash_seed1 < 10; $hash_seed1++)
{
+
for ($hash_seed2 = 0; $hash_seed2 < 10; $hash_seed2++)
{
foreach (17, 31, 127, 8191)
diff --git a/src/tools/ci/windows_build_config.pl b/src/tools/ci/windows_build_config.pl
index b0d4360c748..59268a0bb60 100644
--- a/src/tools/ci/windows_build_config.pl
+++ b/src/tools/ci/windows_build_config.pl
@@ -4,10 +4,10 @@ use warnings;
our $config;
$config->{"tap_tests"} = 1;
-$config->{"asserts"} = 1;
+$config->{"asserts"} = 1;
$config->{"openssl"} = "c:/openssl/1.1/";
-$config->{"perl"} = "c:/strawberry/$ENV{DEFAULT_PERL_VERSION}/perl/";
-$config->{"python"} = "c:/python/";
+$config->{"perl"} = "c:/strawberry/$ENV{DEFAULT_PERL_VERSION}/perl/";
+$config->{"python"} = "c:/python/";
1;
diff --git a/src/tools/mark_pgdllimport.pl b/src/tools/mark_pgdllimport.pl
index 834fcac5b54..7b51ae3c201 100755
--- a/src/tools/mark_pgdllimport.pl
+++ b/src/tools/mark_pgdllimport.pl
@@ -28,17 +28,18 @@ use warnings;
for my $include_file (@ARGV)
{
open(my $rfh, '<', $include_file) || die "$include_file: $!";
- my $buffer = '';
+ my $buffer = '';
my $num_pgdllimport_added = 0;
while (my $raw_line = <$rfh>)
{
- my $needs_pgdllimport = 1;
+ my $needs_pgdllimport = 1;
# By convention we declare global variables explicitly extern. We're
# looking for those not already marked with PGDLLIMPORT.
- $needs_pgdllimport = 0 if $raw_line !~ /^extern\s+/
- || $raw_line =~ /PGDLLIMPORT/;
+ $needs_pgdllimport = 0
+ if $raw_line !~ /^extern\s+/
+ || $raw_line =~ /PGDLLIMPORT/;
# Make a copy of the line and perform a simple-minded comment strip.
# Also strip trailing whitespace.
@@ -48,8 +49,9 @@ for my $include_file (@ARGV)
# Variable declarations should end in a semicolon. If we see an
# opening parenthesis, it's probably a function declaration.
- $needs_pgdllimport = 0 if $stripped_line !~ /;$/
- || $stripped_line =~ /\(/;
+ $needs_pgdllimport = 0
+ if $stripped_line !~ /;$/
+ || $stripped_line =~ /\(/;
# Add PGDLLIMPORT marker, if required.
if ($needs_pgdllimport)
@@ -68,7 +70,7 @@ for my $include_file (@ARGV)
if ($num_pgdllimport_added > 0)
{
printf "%s: adding %d PGDLLIMPORT markers\n",
- $include_file, $num_pgdllimport_added;
+ $include_file, $num_pgdllimport_added;
open(my $wfh, '>', $include_file) || die "$include_file: $!";
print $wfh $buffer;
close($wfh);
diff --git a/src/tools/msvc/MSBuildProject.pm b/src/tools/msvc/MSBuildProject.pm
index 5e312d232e9..f24d9e53482 100644
--- a/src/tools/msvc/MSBuildProject.pm
+++ b/src/tools/msvc/MSBuildProject.pm
@@ -313,7 +313,7 @@ sub WriteItemDefinitionGroup
my $targetmachine =
$self->{platform} eq 'Win32' ? 'MachineX86' : 'MachineX64';
- my $includes = join ';', @{$self->{includes}}, "";
+ my $includes = join ';', @{ $self->{includes} }, "";
print $f <<EOF;
<ItemDefinitionGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index c3058399d49..e4feda10fd8 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -35,22 +35,22 @@ my $libpq;
my @unlink_on_exit;
# Set of variables for modules in contrib/ and src/test/modules/
-my $contrib_defines = {};
-my @contrib_uselibpq = ();
+my $contrib_defines = {};
+my @contrib_uselibpq = ();
my @contrib_uselibpgport = ();
my @contrib_uselibpgcommon = ();
-my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] };
+my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] };
my $contrib_extraincludes = {};
my $contrib_extrasource = {};
-my @contrib_excludes = (
- 'bool_plperl', 'commit_ts',
- 'hstore_plperl', 'hstore_plpython',
- 'intagg', 'jsonb_plperl',
- 'jsonb_plpython', 'ltree_plpython',
- 'sepgsql',
- 'brin', 'test_extensions',
- 'test_misc', 'test_pg_dump',
- 'snapshot_too_old', 'unsafe_tests');
+my @contrib_excludes = (
+ 'bool_plperl', 'commit_ts',
+ 'hstore_plperl', 'hstore_plpython',
+ 'intagg', 'jsonb_plperl',
+ 'jsonb_plpython', 'ltree_plpython',
+ 'sepgsql', 'brin',
+ 'test_extensions', 'test_misc',
+ 'test_pg_dump', 'snapshot_too_old',
+ 'unsafe_tests');
# Set of variables for frontend modules
my $frontend_defines = { 'initdb' => 'FRONTEND' };
@@ -286,16 +286,18 @@ sub mkvcbuild
my $libpq_testclient =
$solution->AddProject('libpq_testclient', 'exe', 'misc',
- 'src/interfaces/libpq/test');
- $libpq_testclient->AddFile('src/interfaces/libpq/test/libpq_testclient.c');
+ 'src/interfaces/libpq/test');
+ $libpq_testclient->AddFile(
+ 'src/interfaces/libpq/test/libpq_testclient.c');
$libpq_testclient->AddIncludeDir('src/interfaces/libpq');
$libpq_testclient->AddReference($libpgport, $libpq);
$libpq_testclient->AddLibrary('ws2_32.lib');
my $libpq_uri_regress =
$solution->AddProject('libpq_uri_regress', 'exe', 'misc',
- 'src/interfaces/libpq/test');
- $libpq_uri_regress->AddFile('src/interfaces/libpq/test/libpq_uri_regress.c');
+ 'src/interfaces/libpq/test');
+ $libpq_uri_regress->AddFile(
+ 'src/interfaces/libpq/test/libpq_uri_regress.c');
$libpq_uri_regress->AddIncludeDir('src/interfaces/libpq');
$libpq_uri_regress->AddReference($libpgport, $libpq);
$libpq_uri_regress->AddLibrary('ws2_32.lib');
@@ -464,7 +466,8 @@ sub mkvcbuild
if (!$solution->{options}->{openssl})
{
- push @contrib_excludes, 'sslinfo', 'ssl_passphrase_callback', 'pgcrypto';
+ push @contrib_excludes, 'sslinfo', 'ssl_passphrase_callback',
+ 'pgcrypto';
}
if (!$solution->{options}->{uuid})
@@ -508,7 +511,8 @@ sub mkvcbuild
my $pymajorver = substr($pyver, 0, 1);
- die "Python version $pyver is too old (version 3 or later is required)"
+ die
+ "Python version $pyver is too old (version 3 or later is required)"
if int($pymajorver) < 3;
my $plpython = $solution->AddProject('plpython' . $pymajorver,
@@ -926,7 +930,7 @@ sub AddTransformModule
# Add PL dependencies
$p->AddIncludeDir($pl_src);
$p->AddReference($pl_proj);
- $p->AddIncludeDir($_) for @{$pl_proj->{includes}};
+ $p->AddIncludeDir($_) for @{ $pl_proj->{includes} };
foreach my $pl_lib (@{ $pl_proj->{libraries} })
{
$p->AddLibrary($pl_lib);
@@ -936,7 +940,7 @@ sub AddTransformModule
if ($type_proj)
{
$p->AddIncludeDir($type_src);
- $p->AddIncludeDir($_) for @{$type_proj->{includes}};
+ $p->AddIncludeDir($_) for @{ $type_proj->{includes} };
foreach my $type_lib (@{ $type_proj->{libraries} })
{
$p->AddLibrary($type_lib);
@@ -950,9 +954,9 @@ sub AddTransformModule
# Add a simple contrib project
sub AddContrib
{
- my $subdir = shift;
- my $n = shift;
- my $mf = Project::read_file("$subdir/$n/Makefile");
+ my $subdir = shift;
+ my $n = shift;
+ my $mf = Project::read_file("$subdir/$n/Makefile");
my @projects = ();
if ($mf =~ /^MODULE_big\s*=\s*(.*)$/mg)
@@ -988,7 +992,8 @@ sub AddContrib
}
# Process custom compiler flags
- if ($mf =~ /^PG_CPPFLAGS\s*=\s*(.*)$/mg || $mf =~ /^override\s*CPPFLAGS\s*[+:]?=\s*(.*)$/mg)
+ if ( $mf =~ /^PG_CPPFLAGS\s*=\s*(.*)$/mg
+ || $mf =~ /^override\s*CPPFLAGS\s*[+:]?=\s*(.*)$/mg)
{
foreach my $flag (split /\s+/, $1)
{
diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm
index d39c502f30a..570bab563a7 100644
--- a/src/tools/msvc/Project.pm
+++ b/src/tools/msvc/Project.pm
@@ -76,11 +76,11 @@ sub AddFiles
# name but a different file extension and add those files too.
sub FindAndAddAdditionalFiles
{
- my $self = shift;
+ my $self = shift;
my $fname = shift;
$fname =~ /(.*)(\.[^.]+)$/;
my $filenoext = $1;
- my $fileext = $2;
+ my $fileext = $2;
# For .c files, check if either a .l or .y file of the same name
# exists and add that too.
@@ -161,7 +161,7 @@ sub AddReference
while (my $ref = shift)
{
- if (! grep { $_ eq $ref} @{ $self->{references} })
+ if (!grep { $_ eq $ref } @{ $self->{references} })
{
push @{ $self->{references} }, $ref;
}
@@ -181,7 +181,7 @@ sub AddLibrary
$lib = '&quot;' . $lib . "&quot;";
}
- if (! grep { $_ eq $lib} @{ $self->{libraries} })
+ if (!grep { $_ eq $lib } @{ $self->{libraries} })
{
push @{ $self->{libraries} }, $lib;
}
@@ -199,7 +199,7 @@ sub AddIncludeDir
foreach my $inc (split(/;/, $incstr))
{
- if (! grep { $_ eq $inc} @{ $self->{includes} })
+ if (!grep { $_ eq $inc } @{ $self->{includes} })
{
push @{ $self->{includes} }, $inc;
}
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index 03357095b20..d30e8fcb117 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -349,7 +349,7 @@ sub GenerateFiles
HAVE_READLINE_READLINE_H => undef,
HAVE_READLINK => undef,
HAVE_READV => undef,
- HAVE_RL_COMPLETION_MATCHES => undef,
+ HAVE_RL_COMPLETION_MATCHES => undef,
HAVE_RL_COMPLETION_SUPPRESS_QUOTE => undef,
HAVE_RL_FILENAME_COMPLETION_FUNCTION => undef,
HAVE_RL_FILENAME_QUOTE_CHARACTERS => undef,
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index 65b7be795c3..c3729f6be5e 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -70,10 +70,10 @@ copy("$Config/regress/regress.dll", "src/test/regress");
copy("$Config/dummy_seclabel/dummy_seclabel.dll", "src/test/regress");
# Configuration settings used by TAP tests
-$ENV{with_ssl} = $config->{openssl} ? 'openssl' : 'no';
-$ENV{with_ldap} = $config->{ldap} ? 'yes' : 'no';
-$ENV{with_icu} = $config->{icu} ? 'yes' : 'no';
-$ENV{with_gssapi} = $config->{gss} ? 'yes' : 'no';
+$ENV{with_ssl} = $config->{openssl} ? 'openssl' : 'no';
+$ENV{with_ldap} = $config->{ldap} ? 'yes' : 'no';
+$ENV{with_icu} = $config->{icu} ? 'yes' : 'no';
+$ENV{with_gssapi} = $config->{gss} ? 'yes' : 'no';
$ENV{with_krb_srvnam} = $config->{krb_srvnam} || 'postgres';
$ENV{with_readline} = 'no';
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 87ee7bf8662..dd1214977a8 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -4,6 +4,7 @@ ACL_SIZE_INFORMATION
AFFIX
ASN1_INTEGER
ASN1_OBJECT
+ASN1_OCTET_STRING
ASN1_STRING
AV
A_ArrayExpr
@@ -63,6 +64,7 @@ AllocSetFreeList
AllocateDesc
AllocateDescKind
AlterCollationStmt
+AlterDatabaseRefreshCollStmt
AlterDatabaseSetStmt
AlterDatabaseStmt
AlterDefaultPrivilegesStmt
@@ -80,6 +82,7 @@ AlterOpFamilyStmt
AlterOperatorStmt
AlterOwnerStmt
AlterPolicyStmt
+AlterPublicationAction
AlterPublicationStmt
AlterRoleSetStmt
AlterRoleStmt
@@ -117,11 +120,17 @@ ApplyErrorCallbackArg
ApplyExecutionData
ApplySubXactData
Archive
+ArchiveCheckConfiguredCB
ArchiveEntryPtrType
+ArchiveFileCB
ArchiveFormat
ArchiveHandle
ArchiveMode
+ArchiveModuleCallbacks
+ArchiveModuleInit
ArchiveOpts
+ArchiveShutdownCB
+ArchiveStreamState
ArchiverOutput
ArchiverStage
ArrayAnalyzeExtraData
@@ -226,6 +235,8 @@ BackgroundWorkerHandle
BackgroundWorkerSlot
Barrier
BaseBackupCmd
+BaseBackupTargetHandle
+BaseBackupTargetType
BeginDirectModify_function
BeginForeignInsert_function
BeginForeignModify_function
@@ -237,6 +248,7 @@ BgwHandleStatus
BinaryArithmFunc
BindParamCbData
BipartiteMatchState
+BitString
BitmapAnd
BitmapAndPath
BitmapAndState
@@ -271,11 +283,11 @@ BloomScanOpaqueData
BloomSignatureWord
BloomState
BloomTuple
-BlowfishContext
BoolAggState
BoolExpr
BoolExprType
BoolTestType
+Boolean
BooleanTest
BpChar
BrinBuildState
@@ -336,6 +348,7 @@ CachedPlanSource
CallContext
CallStmt
CancelRequestPacket
+Cardinality
CaseExpr
CaseTestExpr
CaseWhen
@@ -346,6 +359,7 @@ CatCTup
CatCache
CatCacheHeader
CatalogId
+CatalogIdMapEntry
CatalogIndexState
ChangeVarNodes_context
CheckPoint
@@ -413,6 +427,7 @@ CompositeIOData
CompositeTypeStmt
CompoundAffixFlag
CompressionAlgorithm
+CompressionLocation
CompressorState
ComputeXidHorizonsResult
ConditionVariable
@@ -426,7 +441,6 @@ ConnParams
ConnStatusType
ConnType
ConnectionStateEnum
-ConnsAllowedState
ConsiderSplitContext
Const
ConstrCheck
@@ -446,6 +460,7 @@ CopyDest
CopyFormatOptions
CopyFromState
CopyFromStateData
+CopyHeaderChoice
CopyInsertMethod
CopyMultiInsertBuffer
CopyMultiInsertInfo
@@ -582,8 +597,10 @@ DumpComponents
DumpId
DumpOptions
DumpSignalInformation
+DumpableAcl
DumpableObject
DumpableObjectType
+DumpableObjectWithAcl
DynamicFileList
DynamicZoneAbbrev
EC_KEY
@@ -594,6 +611,7 @@ EOM_get_flat_size_method
EPQState
EPlan
EState
+EStatus
EVP_CIPHER
EVP_CIPHER_CTX
EVP_MD
@@ -635,9 +653,9 @@ EventTriggerInfo
EventTriggerQueryState
ExceptionLabelMap
ExceptionMap
-ExclusiveBackupState
ExecAuxRowMark
ExecEvalBoolSubroutine
+ExecEvalJsonExprContext
ExecEvalSubroutine
ExecForeignBatchInsert_function
ExecForeignDelete_function
@@ -690,14 +708,12 @@ ExtensibleNodeEntry
ExtensibleNodeMethods
ExtensionControlFile
ExtensionInfo
-ExtensionMemberId
ExtensionVersionInfo
FDWCollateState
FD_SET
FILE
FILETIME
-FILE_INFORMATION_CLASS
-FILE_STANDARD_INFORMATION
+FPI
FSMAddress
FSMPage
FSMPageData
@@ -724,6 +740,7 @@ FixedParallelExecutorState
FixedParallelState
FixedParamState
FlagMode
+Float
FlushPosition
FmgrBuiltin
FmgrHookEventType
@@ -793,6 +810,7 @@ FormData_pg_sequence_data
FormData_pg_shdepend
FormData_pg_statistic
FormData_pg_statistic_ext
+FormData_pg_statistic_ext_data
FormData_pg_subscription
FormData_pg_subscription_rel
FormData_pg_tablespace
@@ -850,6 +868,7 @@ Form_pg_sequence_data
Form_pg_shdepend
Form_pg_statistic
Form_pg_statistic_ext
+Form_pg_statistic_ext_data
Form_pg_subscription
Form_pg_subscription_rel
Form_pg_tablespace
@@ -916,6 +935,7 @@ GISTSearchItem
GISTTYPE
GIST_SPLITVEC
GMReaderTupleBuffer
+GROUP
GV
Gather
GatherMerge
@@ -980,6 +1000,7 @@ GistSplitVector
GistTsVectorOptions
GistVacState
GlobalTransaction
+GlobalVisHorizonKind
GlobalVisState
GrantRoleStmt
GrantStmt
@@ -1020,7 +1041,6 @@ HASHELEMENT
HASHHDR
HASHSEGMENT
HASH_SEQ_STATUS
-HCRYPTPROV
HE
HEntry
HIST_ENTRY
@@ -1063,7 +1083,6 @@ HashScanPosData
HashScanPosItem
HashSkewBucket
HashState
-HashTapeInfo
HashValueFunc
HbaLine
HeadlineJsonState
@@ -1087,7 +1106,6 @@ INFIX
INT128
INTERFACE_INFO
IOFuncSelector
-IO_STATUS_BLOCK
IPCompareMethod
ITEM
IV
@@ -1158,13 +1176,14 @@ Instrumentation
Int128AggState
Int8TransTypeData
IntRBTreeNode
+Integer
IntegerSet
InternalDefaultACL
InternalGrant
Interval
IntoClause
-InvalidationChunk
-InvalidationListHeader
+InvalMessageArray
+InvalidationMsgsGroup
IpcMemoryId
IpcMemoryKey
IpcMemoryState
@@ -1206,10 +1225,31 @@ JoinState
JoinType
JsObject
JsValue
+JsonAggConstructor
JsonAggState
+JsonArgument
+JsonArrayAgg
+JsonArrayConstructor
+JsonArrayQueryConstructor
JsonBaseObjectInfo
+JsonBehavior
+JsonBehaviorType
+JsonCoercion
+JsonCommon
+JsonConstructorExpr
+JsonConstructorType
+JsonEncoding
+JsonExpr
+JsonExprOp
+JsonFormat
+JsonFormatType
+JsonFunc
+JsonFuncExpr
JsonHashEntry
+JsonIsPredicate
+JsonItemCoercions
JsonIterateStringValuesAction
+JsonKeyValue
JsonLexContext
JsonLikeRegexContext
JsonManifestFileField
@@ -1217,10 +1257,15 @@ JsonManifestParseContext
JsonManifestParseState
JsonManifestSemanticState
JsonManifestWALRangeField
+JsonObjectAgg
+JsonObjectConstructor
+JsonOutput
JsonParseContext
JsonParseErrorType
+JsonParseExpr
JsonPath
JsonPathBool
+JsonPathDatatypeStatus
JsonPathExecContext
JsonPathExecResult
JsonPathGinAddPathItemFunc
@@ -1233,11 +1278,18 @@ JsonPathGinPathItem
JsonPathItem
JsonPathItemType
JsonPathKeyword
+JsonPathMutableContext
JsonPathParseItem
JsonPathParseResult
JsonPathPredicateCallback
JsonPathString
+JsonPathVarCallback
+JsonPathVariableEvalContext
+JsonQuotes
+JsonReturning
+JsonScalarExpr
JsonSemAction
+JsonSerializeExpr
JsonTable
JsonTableColumn
JsonTableColumnType
@@ -1252,8 +1304,16 @@ JsonTableSibling
JsonTokenType
JsonTransformStringValuesAction
JsonTypeCategory
+JsonUniqueBuilderState
+JsonUniqueCheckState
+JsonUniqueHashEntry
+JsonUniqueParsingState
+JsonUniqueStackEntry
+JsonValueExpr
JsonValueList
JsonValueListIterator
+JsonValueType
+JsonWrapper
Jsonb
JsonbAggState
JsonbContainer
@@ -1268,6 +1328,8 @@ JsonbTypeCategory
JsonbValue
JumbleState
JunkFilter
+KeyAction
+KeyActions
KeyArray
KeySuffix
KeyWord
@@ -1311,6 +1373,7 @@ LPBYTE
LPCTSTR
LPCWSTR
LPDWORD
+LPFILETIME
LPSECURITY_ATTRIBUTES
LPSERVICE_STATUS
LPSTR
@@ -1327,6 +1390,11 @@ LWLock
LWLockHandle
LWLockMode
LWLockPadded
+LZ4F_compressionContext_t
+LZ4F_decompressOptions_t
+LZ4F_decompressionContext_t
+LZ4F_errorCode_t
+LZ4F_preferences_t
LabelProvider
LagTracker
LargeObjectDesc
@@ -1431,6 +1499,7 @@ MBuf
MCVItem
MCVList
MEMORY_BASIC_INFORMATION
+MGVTBL
MINIDUMPWRITEDUMP
MINIDUMP_TYPE
MJEvalResult
@@ -1484,6 +1553,7 @@ ModifyTable
ModifyTableContext
ModifyTablePath
ModifyTableState
+MonotonicFunction
MorphOpaque
MsgType
MultiAssignRef
@@ -1530,6 +1600,7 @@ NotificationHash
NotificationList
NotifyStmt
Nsrt
+NtDllRoutine
NullIfExpr
NullTest
NullTestType
@@ -1604,8 +1675,9 @@ PACL
PATH
PBOOL
PCtxtHandle
+PERL_CONTEXT
+PERL_SI
PFN
-PFN_NTQUERYINFORMATIONFILE
PGAlignedBlock
PGAlignedXLogBlock
PGAsyncStatusType
@@ -1663,7 +1735,6 @@ PGresParamDesc
PGresult
PGresult_data
PHANDLE
-PIO_STATUS_BLOCK
PLAINTREE
PLAssignStmt
PLUID_AND_ATTRIBUTES
@@ -1793,9 +1864,10 @@ PTEntryArray
PTIterationArray
PTOKEN_PRIVILEGES
PTOKEN_USER
+PULONG
PUTENVPROC
PVIndStats
-PvIndVacStatus
+PVIndVacStatus
PVOID
PVShared
PX_Alias
@@ -1897,8 +1969,11 @@ PathClauseUsage
PathCostComparison
PathHashStack
PathKey
+PathKeyInfo
PathKeysComparison
PathTarget
+PathkeyMutatorState
+PathkeySortCost
PatternInfo
PatternInfoArray
Pattern_Prefix_Status
@@ -1908,8 +1983,8 @@ PendingRelDelete
PendingRelSync
PendingUnlinkEntry
PendingWriteback
+PerLockTagEntry
PerlInterpreter
-Perl_check_t
Perl_ppaddr_t
Permutation
PermutationStep
@@ -1964,7 +2039,6 @@ PgStat_Kind
PgStat_KindInfo
PgStat_LocalState
PgStat_PendingDroppedStatsItem
-PgStat_ReplSlotStats
PgStat_SLRUStats
PgStat_ShmemControl
PgStat_Snapshot
@@ -2054,6 +2128,7 @@ ProjectSetPath
ProjectSetState
ProjectionInfo
ProjectionPath
+PromptInterruptContext
ProtocolVersion
PrsStorage
PruneState
@@ -2080,7 +2155,6 @@ PushFilter
PushFilterOps
PushFunction
PyCFunction
-PyCodeObject
PyMappingMethods
PyMethodDef
PyModuleDef
@@ -2125,7 +2199,6 @@ RI_QueryKey
RTEKind
RWConflict
RWConflictPoolHeader
-RandomState
Range
RangeBound
RangeBox
@@ -2153,6 +2226,7 @@ ReadBufferMode
ReadBytePtrType
ReadExtraTocPtrType
ReadFunc
+ReadLocalXLogPageNoWaitPrivate
ReadReplicationSlotCmd
ReassignOwnedStmt
RecheckForeignScan_function
@@ -2199,6 +2273,7 @@ RelationInfo
RelationPtr
RelationSyncEntry
RelcacheCallbackFunction
+ReleaseMatchCB
RelfilenodeMapEntry
RelfilenodeMapKey
Relids
@@ -2275,7 +2350,7 @@ RewriteState
RmgrData
RmgrDescData
RmgrId
-RmgrIds
+RoleNameItem
RoleSpec
RoleSpecType
RoleStmtType
@@ -2288,6 +2363,7 @@ RowMarkClause
RowMarkType
RowSecurityDesc
RowSecurityPolicy
+RtlGetLastNtStatus_t
RuleInfo
RuleLock
RuleStmt
@@ -2325,7 +2401,6 @@ SPLITCOST
SPNode
SPNodeData
SPPageDesc
-SQLCmd
SQLDropObject
SQLFunctionCache
SQLFunctionCachePtr
@@ -2343,7 +2418,7 @@ SYNCHRONIZATION_BARRIER
SampleScan
SampleScanGetSampleSize_function
SampleScanState
-SamplerRandomState
+SavedTransactionCharacteristics
ScalarArrayOpExpr
ScalarArrayOpExprHashEntry
ScalarArrayOpExprHashTable
@@ -2525,7 +2600,6 @@ StatEntry
StatExtEntry
StateFileChunk
StatisticExtInfo
-Stats
StatsBuildData
StatsData
StatsElem
@@ -2537,7 +2611,7 @@ Step
StopList
StrategyNumber
StreamCtl
-StreamXidHash
+String
StringInfo
StringInfoData
StripnullState
@@ -2555,6 +2629,7 @@ SubXactInfo
SubqueryScan
SubqueryScanPath
SubqueryScanState
+SubqueryScanStatus
SubscriptExecSetup
SubscriptExecSteps
SubscriptRoutines
@@ -2569,6 +2644,7 @@ SupportRequestIndexCondition
SupportRequestRows
SupportRequestSelectivity
SupportRequestSimplify
+SupportRequestWFuncMonotonic
Syn
SyncOps
SyncRepConfigData
@@ -2631,6 +2707,7 @@ TSVectorData
TSVectorParseState
TSVectorStat
TState
+TStatus
TStoreState
TXNEntryFile
TYPCATEGORY
@@ -2801,7 +2878,6 @@ UniquePath
UniquePathMethod
UniqueState
UnlistenStmt
-UnpackTarState
UnresolvedTup
UnresolvedTupData
UpdateContext
@@ -2820,7 +2896,6 @@ VacuumParams
VacuumRelation
VacuumStmt
ValidateIndexState
-Value
ValuesScan
ValuesScanState
Var
@@ -2839,6 +2914,7 @@ VariableShowStmt
VariableSpace
VariableStatData
VariableSubstituteHook
+Variables
VersionedQuery
Vfd
ViewCheckOption
@@ -2878,7 +2954,6 @@ WaitEventTimeout
WaitPMResult
WalCloseMethod
WalCompression
-WalCompressionMethod
WalLevel
WalRcvData
WalRcvExecResult
@@ -2898,6 +2973,7 @@ Walfile
WindowAgg
WindowAggPath
WindowAggState
+WindowAggStatus
WindowClause
WindowClauseSortData
WindowDef
@@ -2944,7 +3020,6 @@ XLogCtlData
XLogCtlInsert
XLogDumpConfig
XLogDumpPrivate
-XLogDumpStats
XLogLongPageHeader
XLogLongPageHeaderData
XLogPageHeader
@@ -2952,13 +3027,14 @@ XLogPageHeaderData
XLogPageReadCB
XLogPageReadPrivate
XLogPageReadResult
+XLogPrefetchStats
XLogPrefetcher
XLogPrefetcherFilter
-XLogPrefetchStats
XLogReaderRoutine
XLogReaderState
XLogRecData
XLogRecPtr
+XLogRecStats
XLogRecord
XLogRecordBlockCompressHeader
XLogRecordBlockHeader
@@ -2968,8 +3044,10 @@ XLogRecoveryCtlData
XLogRedoAction
XLogSegNo
XLogSource
+XLogStats
XLogwrtResult
XLogwrtRqst
+XPV
XPVIV
XPVMG
XactCallback
@@ -2988,6 +3066,10 @@ XmlTableBuilderData
YYLTYPE
YYSTYPE
YY_BUFFER_STATE
+ZSTD_CCtx
+ZSTD_DCtx
+ZSTD_inBuffer
+ZSTD_outBuffer
_SPI_connection
_SPI_plan
__AssignProcessToJobObject
@@ -2999,6 +3081,7 @@ __SetInformationJobObject
__time64_t
_dev_t
_ino_t
+_locale_t
_resultmap
_stringlist
acquireLocksOnSubLinks_context
@@ -3041,6 +3124,29 @@ backup_manifest_info
backup_manifest_option
base_yy_extra_type
basebackup_options
+bbsink
+bbsink_copystream
+bbsink_gzip
+bbsink_lz4
+bbsink_ops
+bbsink_server
+bbsink_shell
+bbsink_state
+bbsink_throttle
+bbsink_zstd
+bbstreamer
+bbstreamer_archive_context
+bbstreamer_extractor
+bbstreamer_gzip_decompressor
+bbstreamer_gzip_writer
+bbstreamer_lz4_frame
+bbstreamer_member
+bbstreamer_ops
+bbstreamer_plain_writer
+bbstreamer_recovery_injector
+bbstreamer_tar_archiver
+bbstreamer_tar_parser
+bbstreamer_zstd_frame
bgworker_main_type
binaryheap
binaryheap_comparator
@@ -3049,11 +3155,14 @@ bits16
bits32
bits8
bloom_filter
+boolKEY
brin_column_state
brin_serialize_callback_type
bytea
cached_re_str
+canonicalize_state
cashKEY
+catalogid_hash
cfp
check_agg_arguments_context
check_function_callback
@@ -3064,9 +3173,7 @@ check_ungrouped_columns_context
chr
clock_t
cmpEntriesArg
-cmpfunc
codes_t
-coercion
collation_cache_entry
color
colormaprange
@@ -3159,6 +3266,7 @@ find_expr_references_context
fix_join_expr_context
fix_scan_expr_context
fix_upper_expr_context
+fix_windowagg_cond_context
flatten_join_alias_vars_context
float4
float4KEY
@@ -3239,7 +3347,6 @@ init_function
inline_cte_walker_context
inline_error_callback_arg
ino_t
-inquiry
instr_time
int128
int16
@@ -3319,16 +3426,8 @@ mix_data_t
mixedStruct
mode_t
movedb_failure_params
-mp_digit
-mp_int
-mp_result
-mp_sign
-mp_size
-mp_small
-mp_usmall
-mp_word
-mpz_t
multirange_bsearch_comparison
+multirange_unnest_fctx
mxact
mxtruncinfo
needs_fmgr_hook_type
@@ -3338,6 +3437,7 @@ normal_rand_fctx
ntile_context
numeric
object_access_hook_type
+object_access_hook_type_str
off_t
oidKEY
oidvector
@@ -3391,6 +3491,7 @@ pg_locale_t
pg_mb_radix_tree
pg_md5_ctx
pg_on_exit_callback
+pg_prng_state
pg_re_flags
pg_saslprep_rc
pg_sha1_ctx
@@ -3426,7 +3527,9 @@ pgssSharedState
pgssStoreKind
pgssVersion
pgstat_entry_ref_hash_hash
+pgstat_entry_ref_hash_iterator
pgstat_page
+pgstat_snapshot_hash
pgstattuple_type
pgthreadlock_t
pid_t
@@ -3529,7 +3632,6 @@ ret_type
rewind_source
rewrite_event
rf_context
-rijndael_ctx
rm_detail_t
role_auth_extra
row_security_policy_hook_type
@@ -3561,6 +3663,7 @@ slist_mutable_iter
slist_node
slock_t
socket_set
+socklen_t
spgBulkDeleteState
spgChooseIn
spgChooseOut
@@ -3603,7 +3706,6 @@ string
substitute_actual_parameters_context
substitute_actual_srf_parameters_context
substitute_phv_relids_context
-svtype
symbol
tablespaceinfo
teSection
@@ -3635,8 +3737,6 @@ tuplehash_hash
tuplehash_iterator
type
tzEntry
-u1byte
-u4byte
u_char
u_int
uchr