diff options
29 files changed, 550 insertions, 146 deletions
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 98d4e3d7dac..8a0b112a7ff 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -105,7 +105,7 @@ static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first); static remoteConn *getConnectionByName(const char *name); static HTAB *createConnHash(void); -static void createNewConnection(const char *name, remoteConn *rconn); +static remoteConn *createNewConnection(const char *name); static void deleteConnection(const char *name); static char **get_pkey_attnames(Relation rel, int16 *indnkeyatts); static char **get_text_array_contents(ArrayType *array, int *numitems); @@ -119,7 +119,8 @@ static Relation get_rel_from_relname(text *relname_text, LOCKMODE lockmode, AclM static char *generate_relation_name(Relation rel); static void dblink_connstr_check(const char *connstr); static bool dblink_connstr_has_pw(const char *connstr); -static void dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr); +static void dblink_security_check(PGconn *conn, const char *connname, + const char *connstr); static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res, bool fail, const char *fmt,...) pg_attribute_printf(5, 6); static char *get_connect_string(const char *servername); @@ -147,16 +148,22 @@ static uint32 dblink_we_get_conn = 0; static uint32 dblink_we_get_result = 0; /* - * Following is list that holds multiple remote connections. + * Following is hash that holds multiple remote connections. * Calling convention of each dblink function changes to accept - * connection name as the first parameter. The connection list is + * connection name as the first parameter. The connection hash is * much like ecpg e.g. a mapping between a name and a PGconn object. + * + * To avoid potentially leaking a PGconn object in case of out-of-memory + * errors, we first create the hash entry, then open the PGconn. + * Hence, a hash entry whose rconn.conn pointer is NULL must be + * understood as a leftover from a failed create; it should be ignored + * by lookup operations, and silently replaced by create operations. */ typedef struct remoteConnHashEnt { char name[NAMEDATALEN]; - remoteConn *rconn; + remoteConn rconn; } remoteConnHashEnt; /* initial number of connection hashes */ @@ -233,7 +240,7 @@ dblink_get_conn(char *conname_or_str, errmsg("could not establish connection"), errdetail_internal("%s", msg))); } - dblink_security_check(conn, rconn, connstr); + dblink_security_check(conn, NULL, connstr); if (PQclientEncoding(conn) != GetDatabaseEncoding()) PQsetClientEncoding(conn, GetDatabaseEncodingName()); freeconn = true; @@ -296,15 +303,6 @@ dblink_connect(PG_FUNCTION_ARGS) else if (PG_NARGS() == 1) conname_or_str = text_to_cstring(PG_GETARG_TEXT_PP(0)); - if (connname) - { - rconn = (remoteConn *) MemoryContextAlloc(TopMemoryContext, - sizeof(remoteConn)); - rconn->conn = NULL; - rconn->openCursorCount = 0; - rconn->newXactForCursor = false; - } - /* first check for valid foreign data server */ connstr = get_connect_string(conname_or_str); if (connstr == NULL) @@ -317,6 +315,13 @@ dblink_connect(PG_FUNCTION_ARGS) if (dblink_we_connect == 0) dblink_we_connect = WaitEventExtensionNew("DblinkConnect"); + /* if we need a hashtable entry, make that first, since it might fail */ + if (connname) + { + rconn = createNewConnection(connname); + Assert(rconn->conn == NULL); + } + /* OK to make connection */ conn = libpqsrv_connect(connstr, dblink_we_connect); @@ -324,8 +329,8 @@ dblink_connect(PG_FUNCTION_ARGS) { msg = pchomp(PQerrorMessage(conn)); libpqsrv_disconnect(conn); - if (rconn) - pfree(rconn); + if (connname) + deleteConnection(connname); ereport(ERROR, (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), @@ -334,16 +339,16 @@ dblink_connect(PG_FUNCTION_ARGS) } /* check password actually used if not superuser */ - dblink_security_check(conn, rconn, connstr); + dblink_security_check(conn, connname, connstr); /* attempt to set client encoding to match server encoding, if needed */ if (PQclientEncoding(conn) != GetDatabaseEncoding()) PQsetClientEncoding(conn, GetDatabaseEncodingName()); + /* all OK, save away the conn */ if (connname) { rconn->conn = conn; - createNewConnection(connname, rconn); } else { @@ -383,10 +388,7 @@ dblink_disconnect(PG_FUNCTION_ARGS) libpqsrv_disconnect(conn); if (rconn) - { deleteConnection(conname); - pfree(rconn); - } else pconn->conn = NULL; @@ -1304,6 +1306,9 @@ dblink_get_connections(PG_FUNCTION_ARGS) hash_seq_init(&status, remoteConnHash); while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL) { + /* ignore it if it's not an open connection */ + if (hentry->rconn.conn == NULL) + continue; /* stash away current value */ astate = accumArrayResult(astate, CStringGetTextDatum(hentry->name), @@ -2539,8 +2544,8 @@ getConnectionByName(const char *name) hentry = (remoteConnHashEnt *) hash_search(remoteConnHash, key, HASH_FIND, NULL); - if (hentry) - return hentry->rconn; + if (hentry && hentry->rconn.conn != NULL) + return &hentry->rconn; return NULL; } @@ -2557,8 +2562,8 @@ createConnHash(void) HASH_ELEM | HASH_STRINGS); } -static void -createNewConnection(const char *name, remoteConn *rconn) +static remoteConn * +createNewConnection(const char *name) { remoteConnHashEnt *hentry; bool found; @@ -2572,17 +2577,15 @@ createNewConnection(const char *name, remoteConn *rconn) hentry = (remoteConnHashEnt *) hash_search(remoteConnHash, key, HASH_ENTER, &found); - if (found) - { - libpqsrv_disconnect(rconn->conn); - pfree(rconn); - + if (found && hentry->rconn.conn != NULL) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("duplicate connection name"))); - } - hentry->rconn = rconn; + /* New, or reusable, so initialize the rconn struct to zeroes */ + memset(&hentry->rconn, 0, sizeof(remoteConn)); + + return &hentry->rconn; } static void @@ -2671,9 +2674,12 @@ dblink_connstr_has_required_scram_options(const char *connstr) * We need to make sure that the connection made used credentials * which were provided by the user, so check what credentials were * used to connect and then make sure that they came from the user. + * + * On failure, we close "conn" and also delete the hashtable entry + * identified by "connname" (if that's not NULL). */ static void -dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr) +dblink_security_check(PGconn *conn, const char *connname, const char *connstr) { /* Superuser bypasses security check */ if (superuser()) @@ -2703,8 +2709,8 @@ dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr) /* Otherwise, fail out */ libpqsrv_disconnect(conn); - if (rconn) - pfree(rconn); + if (connname) + deleteConnection(connname); ereport(ERROR, (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), diff --git a/contrib/pg_prewarm/Makefile b/contrib/pg_prewarm/Makefile index 9cfde8c4e4f..617ac8e09b2 100644 --- a/contrib/pg_prewarm/Makefile +++ b/contrib/pg_prewarm/Makefile @@ -10,6 +10,8 @@ EXTENSION = pg_prewarm DATA = pg_prewarm--1.1--1.2.sql pg_prewarm--1.1.sql pg_prewarm--1.0--1.1.sql PGFILEDESC = "pg_prewarm - preload relation data into system buffer cache" +REGRESS = pg_prewarm + TAP_TESTS = 1 ifdef USE_PGXS diff --git a/contrib/pg_prewarm/expected/pg_prewarm.out b/contrib/pg_prewarm/expected/pg_prewarm.out new file mode 100644 index 00000000000..94e4fa1a9d2 --- /dev/null +++ b/contrib/pg_prewarm/expected/pg_prewarm.out @@ -0,0 +1,10 @@ +-- Test pg_prewarm extension +CREATE EXTENSION pg_prewarm; +-- pg_prewarm() should fail if the target relation has no storage. +CREATE TABLE test (c1 int) PARTITION BY RANGE (c1); +SELECT pg_prewarm('test', 'buffer'); +ERROR: relation "test" does not have storage +DETAIL: This operation is not supported for partitioned tables. +-- Cleanup +DROP TABLE test; +DROP EXTENSION pg_prewarm; diff --git a/contrib/pg_prewarm/meson.build b/contrib/pg_prewarm/meson.build index 82b9851303c..f24c47ef6a5 100644 --- a/contrib/pg_prewarm/meson.build +++ b/contrib/pg_prewarm/meson.build @@ -29,6 +29,11 @@ tests += { 'name': 'pg_prewarm', 'sd': meson.current_source_dir(), 'bd': meson.current_build_dir(), + 'regress': { + 'sql': [ + 'pg_prewarm', + ], + }, 'tap': { 'tests': [ 't/001_basic.pl', diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c index 50808569bd7..b968933ea8b 100644 --- a/contrib/pg_prewarm/pg_prewarm.c +++ b/contrib/pg_prewarm/pg_prewarm.c @@ -112,6 +112,14 @@ pg_prewarm(PG_FUNCTION_ARGS) if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), get_rel_name(relOid)); + /* Check that the relation has storage. */ + if (!RELKIND_HAS_STORAGE(rel->rd_rel->relkind)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("relation \"%s\" does not have storage", + RelationGetRelationName(rel)), + errdetail_relkind_not_supported(rel->rd_rel->relkind))); + /* Check that the fork exists. */ if (!smgrexists(RelationGetSmgr(rel), forkNumber)) ereport(ERROR, diff --git a/contrib/pg_prewarm/sql/pg_prewarm.sql b/contrib/pg_prewarm/sql/pg_prewarm.sql new file mode 100644 index 00000000000..c76f2c79164 --- /dev/null +++ b/contrib/pg_prewarm/sql/pg_prewarm.sql @@ -0,0 +1,10 @@ +-- Test pg_prewarm extension +CREATE EXTENSION pg_prewarm; + +-- pg_prewarm() should fail if the target relation has no storage. +CREATE TABLE test (c1 int) PARTITION BY RANGE (c1); +SELECT pg_prewarm('test', 'buffer'); + +-- Cleanup +DROP TABLE test; +DROP EXTENSION pg_prewarm; diff --git a/contrib/pg_stat_statements/expected/extended.out b/contrib/pg_stat_statements/expected/extended.out index 04a05943372..7da308ba84f 100644 --- a/contrib/pg_stat_statements/expected/extended.out +++ b/contrib/pg_stat_statements/expected/extended.out @@ -68,3 +68,61 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C"; 1 | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t (4 rows) +-- Various parameter numbering patterns +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +-- Unique query IDs with parameter numbers switched. +SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g +-- +(0 rows) + +SELECT WHERE ($2::int, 10) IN ((11, $3::int), ($1::int, 12)) \bind '1' '2' '3' \g +-- +(0 rows) + +SELECT WHERE $1::int IN ($2::int, $3::int) \bind '1' '2' '3' \g +-- +(0 rows) + +SELECT WHERE $2::int IN ($3::int, $1::int) \bind '1' '2' '3' \g +-- +(0 rows) + +SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g +-- +(0 rows) + +-- Two groups of two queries with the same query ID. +SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g +-- +(1 row) + +SELECT WHERE '4'::int IN ($1::int, '5'::int) \bind '2' \g +-- +(0 rows) + +SELECT WHERE $2::int IN ($1::int, '1'::int) \bind '1' '2' \g +-- +(0 rows) + +SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g +-- +(0 rows) + +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; + query | calls +--------------------------------------------------------------+------- + SELECT WHERE $1::int IN ($2::int, $3::int) | 1 + SELECT WHERE $2::int IN ($1::int, $3::int) | 2 + SELECT WHERE $2::int IN ($1::int, $3::int) | 2 + SELECT WHERE $2::int IN ($3::int, $1::int) | 1 + SELECT WHERE $3::int IN ($1::int, $2::int) | 1 + SELECT WHERE ($1::int, $4) IN (($5, $2::int), ($3::int, $6)) | 1 + SELECT WHERE ($2::int, $4) IN (($5, $3::int), ($1::int, $6)) | 1 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 +(8 rows) + diff --git a/contrib/pg_stat_statements/expected/select.out b/contrib/pg_stat_statements/expected/select.out index 09476a7b699..038ae110364 100644 --- a/contrib/pg_stat_statements/expected/select.out +++ b/contrib/pg_stat_statements/expected/select.out @@ -238,6 +238,35 @@ SELECT pg_stat_statements_reset() IS NOT NULL AS t; t (1 row) +-- normalization of constants and parameters, with constant locations +-- recorded one or more times. +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +SELECT WHERE '1' IN ('1'::int, '3'::int::text); +-- +(1 row) + +SELECT WHERE (1, 2) IN ((1, 2), (2, 3)); +-- +(1 row) + +SELECT WHERE (3, 4) IN ((5, 6), (8, 7)); +-- +(0 rows) + +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; + query | calls +------------------------------------------------------------------------+------- + SELECT WHERE $1 IN ($2::int, $3::int::text) | 1 + SELECT WHERE ($1, $2) IN (($3, $4), ($5, $6)) | 2 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 + SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 +(4 rows) + -- -- queries with locking clauses -- diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index d8fdf42df79..c58f34e9f30 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -2818,9 +2818,7 @@ generate_normalized_query(JumbleState *jstate, const char *query, last_off = 0, /* Offset from start for previous tok */ last_tok_len = 0; /* Length (in bytes) of that tok */ bool in_squashed = false; /* in a run of squashed consts? */ - int skipped_constants = 0; /* Position adjustment of later - * constants after squashed ones */ - + int num_constants_replaced = 0; /* * Get constants' lengths (core system only gives us locations). Note @@ -2878,7 +2876,7 @@ generate_normalized_query(JumbleState *jstate, const char *query, /* ... and then a param symbol replacing the constant itself */ n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d", - i + 1 + jstate->highest_extern_param_id - skipped_constants); + num_constants_replaced++ + 1 + jstate->highest_extern_param_id); /* In case previous constants were merged away, stop doing that */ in_squashed = false; @@ -2902,12 +2900,10 @@ generate_normalized_query(JumbleState *jstate, const char *query, /* ... and then start a run of squashed constants */ n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d /*, ... */", - i + 1 + jstate->highest_extern_param_id - skipped_constants); + num_constants_replaced++ + 1 + jstate->highest_extern_param_id); /* The next location will match the block below, to end the run */ in_squashed = true; - - skipped_constants++; } else { diff --git a/contrib/pg_stat_statements/sql/extended.sql b/contrib/pg_stat_statements/sql/extended.sql index 1af0711020c..a366658a53a 100644 --- a/contrib/pg_stat_statements/sql/extended.sql +++ b/contrib/pg_stat_statements/sql/extended.sql @@ -19,3 +19,19 @@ SELECT $1 \bind 'unnamed_val1' \g \bind_named stmt1 'stmt1_val1' \g SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C"; + +-- Various parameter numbering patterns +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +-- Unique query IDs with parameter numbers switched. +SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g +SELECT WHERE ($2::int, 10) IN ((11, $3::int), ($1::int, 12)) \bind '1' '2' '3' \g +SELECT WHERE $1::int IN ($2::int, $3::int) \bind '1' '2' '3' \g +SELECT WHERE $2::int IN ($3::int, $1::int) \bind '1' '2' '3' \g +SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g +-- Two groups of two queries with the same query ID. +SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g +SELECT WHERE '4'::int IN ($1::int, '5'::int) \bind '2' \g +SELECT WHERE $2::int IN ($1::int, '1'::int) \bind '1' '2' \g +SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g + +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; diff --git a/contrib/pg_stat_statements/sql/select.sql b/contrib/pg_stat_statements/sql/select.sql index c5e0b84ee5b..189d405512f 100644 --- a/contrib/pg_stat_statements/sql/select.sql +++ b/contrib/pg_stat_statements/sql/select.sql @@ -79,6 +79,14 @@ DEALLOCATE pgss_test; SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C"; SELECT pg_stat_statements_reset() IS NOT NULL AS t; +-- normalization of constants and parameters, with constant locations +-- recorded one or more times. +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +SELECT WHERE '1' IN ('1'::int, '3'::int::text); +SELECT WHERE (1, 2) IN ((1, 2), (2, 3)); +SELECT WHERE (3, 4) IN ((5, 6), (8, 7)); +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; + -- -- queries with locking clauses -- diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index ca2a567b2b1..f4a0191c55b 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -7527,12 +7527,12 @@ local0.* /var/log/postgresql <entry><literal>setup_durations</literal></entry> <entry> Logs the time spent establishing the connection and setting up the - backend at the time the connection is ready to execute its first - query. The log message includes the total setup duration, starting - from the postmaster accepting the incoming connection and ending - when the connection is ready for query. It also includes the time - it took to fork the new backend and the time it took to - authenticate the user. + backend until the connection is ready to execute its first + query. The log message includes three durations: the total + setup duration (starting from the postmaster accepting the + incoming connection and ending when the connection is ready + for query), the time it took to fork the new backend, and + the time it took to authenticate the user. </entry> </row> diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index c10bca63e55..d7595a7e546 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -1134,7 +1134,7 @@ PostgreSQL documentation <term><option>--no-statistics</option></term> <listitem> <para> - Do not dump statistics. + Do not dump statistics. This is the default. </para> </listitem> </varlistentry> @@ -1461,7 +1461,7 @@ PostgreSQL documentation <term><option>--with-statistics</option></term> <listitem> <para> - Dump statistics. This is the default. + Dump statistics. </para> </listitem> </varlistentry> @@ -1681,14 +1681,14 @@ CREATE DATABASE foo WITH TEMPLATE template0; </para> <para> - By default, <command>pg_dump</command> will include most optimizer - statistics in the resulting dump file. However, some statistics may not be - included, such as those created explicitly with <xref - linkend="sql-createstatistics"/> or custom statistics added by an - extension. Therefore, it may be useful to run <command>ANALYZE</command> - after restoring from a dump file to ensure optimal performance; see <xref - linkend="vacuum-for-statistics"/> and <xref linkend="autovacuum"/> for more - information. + If <option>--with-statistics</option> is specified, + <command>pg_dump</command> will include most optimizer statistics in the + resulting dump file. However, some statistics may not be included, such as + those created explicitly with <xref linkend="sql-createstatistics"/> or + custom statistics added by an extension. Therefore, it may be useful to + run <command>ANALYZE</command> after restoring from a dump file to ensure + optimal performance; see <xref linkend="vacuum-for-statistics"/> and <xref + linkend="autovacuum"/> for more information. </para> <para> diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 8c5141d036c..723a466cfaa 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -567,7 +567,7 @@ exclude database <replaceable class="parameter">PATTERN</replaceable> <term><option>--no-statistics</option></term> <listitem> <para> - Do not dump statistics. + Do not dump statistics. This is the default. </para> </listitem> </varlistentry> @@ -741,7 +741,7 @@ exclude database <replaceable class="parameter">PATTERN</replaceable> <term><option>--with-statistics</option></term> <listitem> <para> - Dump statistics. This is the default. + Dump statistics. </para> </listitem> </varlistentry> @@ -957,14 +957,14 @@ exclude database <replaceable class="parameter">PATTERN</replaceable> </para> <para> - By default, <command>pg_dumpall</command> will include most optimizer - statistics in the resulting dump file. However, some statistics may not be - included, such as those created explicitly with <xref - linkend="sql-createstatistics"/> or custom statistics added by an - extension. Therefore, it may be useful to run <command>ANALYZE</command> - on each database after restoring from a dump file to ensure optimal - performance. You can also run <command>vacuumdb -a -z</command> to analyze - all databases. + If <option>--with-statistics</option> is specified, + <command>pg_dumpall</command> will include most optimizer statistics in the + resulting dump file. However, some statistics may not be included, such as + those created explicitly with <xref linkend="sql-createstatistics"/> or + custom statistics added by an extension. Therefore, it may be useful to + run <command>ANALYZE</command> on each database after restoring from a dump + file to ensure optimal performance. You can also run <command>vacuumdb -a + -z</command> to analyze all databases. </para> <para> diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index 2ae03065f94..3315ea52def 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -82,7 +82,8 @@ Deprecate MD5 password authentication (Nathan Bossart) </para> <para> -Warnings generated by their use can be disabled by the server variable md5_password_warnings. +Support for MD5 passwords will be removed in a future major version release. CREATE ROLE and ALTER ROLE now emit deprecation warnings when setting MD5 passwords. +These warnings can be disabled by setting the md5_password_warnings parameter to "off". </para> </listitem> @@ -568,6 +569,7 @@ Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavu </para> <para> +This feature allows backends to queue multiple read requests, which allows for more efficient sequential scans, bitmap heap scans, vacuums, etc. This is enabled by server variable io_method, with server variables io_combine_limit and io_max_combine_limit added to control it. This also enables effective_io_concurrency and maintenance_io_concurrency values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used for asynchronous I/O. @@ -681,15 +683,12 @@ This more accurately reflects modern hardware. <!-- Author: Melanie Plageman <melanieplageman@gmail.com> 2025-03-12 [9219093ca] Modularize log_connections output -Author: Melanie Plageman <melanieplageman@gmail.com> -2025-03-12 [18cd15e70] Add connection establishment duration logging --> <listitem> <para> Increase the logging granularity of server variable log_connections (Melanie Plageman) <ulink url="&commit_baseurl;9219093ca">§</ulink> -<ulink url="&commit_baseurl;18cd15e70">§</ulink> </para> <para> @@ -698,6 +697,18 @@ This server variable was previously only boolean; these options are still suppo </listitem> <!-- +Author: Melanie Plageman <melanieplageman@gmail.com> +2025-03-12 [18cd15e70] Add connection establishment duration logging +--> + +<listitem> +<para> +Add log_connections option to report the duration of connection stages (Melanie Plageman) +<ulink url="&commit_baseurl;18cd15e70">§</ulink> +</para> +</listitem> + +<!-- Author: Tom Lane <tgl@sss.pgh.pa.us> 2025-04-07 [3516ea768] Add local-address escape "%L" to log_line_prefix. --> @@ -762,6 +773,33 @@ mode; tracking must be enabled with the server variable track_cost_delay_timing. </listitem> <!-- +Author: Masahiko Sawada <msawada@postgresql.org> +2024-08-13 [4c1b4cdb8] Add resource statistics reporting to ANALYZE VERBOSE. +Author: Masahiko Sawada <msawada@postgresql.org> +2024-09-09 [bb7775234] Add WAL usage reporting to ANALYZE VERBOSE output. +--> + +<listitem> +<para> +Add WAL, CPU, and average read statistics output to ANALYZE VERBOSE (Anthonin Bonnefoy) +<ulink url="&commit_baseurl;4c1b4cdb8">§</ulink> +<ulink url="&commit_baseurl;bb7775234">§</ulink> +</para> +</listitem> + +<!-- +Author: Michael Paquier <michael@paquier.xyz> +2025-02-17 [6a8a7ce47] Add information about WAL buffers full to VACUUM/ANALYZE +--> + +<listitem> +<para> +Add full WAL buffer count to VACUUM/ANALYZE (VERBOSE) and autovacuum log output (Bertrand Drouvot) +<ulink url="&commit_baseurl;6a8a7ce47">§</ulink> +</para> +</listitem> + +<!-- Author: Michael Paquier <michael@paquier.xyz> 2024-12-19 [9aea73fc6] Add backend-level statistics to pgstats Author: Michael Paquier <michael@paquier.xyz> @@ -1740,32 +1778,14 @@ Automatically include BUFFERS output in EXPLAIN ANALYZE (Guillaume Lelarge, Davi </listitem> <!-- -Author: Masahiko Sawada <msawada@postgresql.org> -2024-08-13 [4c1b4cdb8] Add resource statistics reporting to ANALYZE VERBOSE. -Author: Masahiko Sawada <msawada@postgresql.org> -2024-09-09 [bb7775234] Add WAL usage reporting to ANALYZE VERBOSE output. ---> - -<listitem> -<para> -Add WAL, CPU, and average read statistics output to EXPLAIN ANALYZE VERBOSE (Anthonin Bonnefoy) -<ulink url="&commit_baseurl;4c1b4cdb8">§</ulink> -<ulink url="&commit_baseurl;bb7775234">§</ulink> -</para> -</listitem> - -<!-- Author: Michael Paquier <michael@paquier.xyz> 2025-02-17 [320545bfc] Add information about WAL buffers being full to EXPLAIN -Author: Michael Paquier <michael@paquier.xyz> -2025-02-17 [6a8a7ce47] Add information about WAL buffers full to VACUUM/ANALYZE --> <listitem> <para> -Add full WAL buffer count to EXPLAIN (WAL), VACUUM/ANALYZE (VERBOSE), and autovacuum log output (Bertrand Drouvot) +Add full WAL buffer count to EXPLAIN (WAL) output (Bertrand Drouvot) <ulink url="&commit_baseurl;320545bfc">§</ulink> -<ulink url="&commit_baseurl;6a8a7ce47">§</ulink> </para> </listitem> @@ -2745,7 +2765,7 @@ This is to handle cases where a pre-Postgres 18 cluster's default CPU signedness </sect4> <sect4 id="release-18-logicalrep-app"> - <title>Logical Replication Applications></title> + <title>Logical Replication Applications</title> <itemizedlist> @@ -2904,13 +2924,22 @@ Add ARM Neon and SVE CPU intrinsics for popcount (integer bit counting) (Chiranm <!-- Author: Dean Rasheed <dean.a.rasheed@gmail.com> +2024-07-09 [ca481d3c9] Optimise numeric multiplication for short inputs. +Author: Dean Rasheed <dean.a.rasheed@gmail.com> +2024-08-15 [c4e44224c] Extend mul_var_short() to 5 and 6-digit inputs. +Author: Dean Rasheed <dean.a.rasheed@gmail.com> 2024-08-15 [8dc28d7eb] Optimise numeric multiplication using base-NBASE^2 arith +Author: Dean Rasheed <dean.a.rasheed@gmail.com> +2024-10-04 [9428c001f] Speed up numeric division by always using the "fast" alg --> <listitem> <para> -Improve the speed of multiplication (Joel Jacobson, Dean Rasheed) +Improve the speed of numeric multiplication and division (Joel Jacobson, Dean Rasheed) +<ulink url="&commit_baseurl;ca481d3c9">§</ulink> +<ulink url="&commit_baseurl;c4e44224c">§</ulink> <ulink url="&commit_baseurl;8dc28d7eb">§</ulink> +<ulink url="&commit_baseurl;9428c001f">§</ulink> </para> </listitem> diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index f28326bad09..708674d8fcf 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1413,12 +1413,26 @@ lazy_scan_heap(LVRelState *vacrel) if (vm_page_frozen) { - Assert(vacrel->eager_scan_remaining_successes > 0); - vacrel->eager_scan_remaining_successes--; + if (vacrel->eager_scan_remaining_successes > 0) + vacrel->eager_scan_remaining_successes--; if (vacrel->eager_scan_remaining_successes == 0) { /* + * Report only once that we disabled eager scanning. We + * may eagerly read ahead blocks in excess of the success + * or failure caps before attempting to freeze them, so we + * could reach here even after disabling additional eager + * scanning. + */ + if (vacrel->eager_scan_max_fails_per_region > 0) + ereport(vacrel->verbose ? INFO : DEBUG2, + (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"", + orig_eager_scan_success_limit, + vacrel->dbname, vacrel->relnamespace, + vacrel->relname))); + + /* * If we hit our success cap, permanently disable eager * scanning by setting the other eager scan management * fields to their disabled values. @@ -1426,19 +1440,10 @@ lazy_scan_heap(LVRelState *vacrel) vacrel->eager_scan_remaining_fails = 0; vacrel->next_eager_scan_region_start = InvalidBlockNumber; vacrel->eager_scan_max_fails_per_region = 0; - - ereport(vacrel->verbose ? INFO : DEBUG2, - (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"", - orig_eager_scan_success_limit, - vacrel->dbname, vacrel->relnamespace, - vacrel->relname))); } } - else - { - Assert(vacrel->eager_scan_remaining_fails > 0); + else if (vacrel->eager_scan_remaining_fails > 0) vacrel->eager_scan_remaining_fails--; - } } /* diff --git a/src/backend/utils/activity/wait_event_names.txt b/src/backend/utils/activity/wait_event_names.txt index 5d9e04d6823..4da68312b5f 100644 --- a/src/backend/utils/activity/wait_event_names.txt +++ b/src/backend/utils/activity/wait_event_names.txt @@ -401,6 +401,7 @@ SerialSLRU "Waiting to access the serializable transaction conflict SLRU cache." SubtransSLRU "Waiting to access the sub-transaction SLRU cache." XactSLRU "Waiting to access the transaction status SLRU cache." ParallelVacuumDSA "Waiting for parallel vacuum dynamic shared memory allocation." +AioUringCompletion "Waiting for another process to complete IO via io_uring." # No "ABI_compatibility" region here as WaitEventLWLock has its own C code. diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index 793d8a9adcc..680fee2a844 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -702,9 +702,18 @@ ParseFraction(char *cp, double *frac) } else { + /* + * On the other hand, let's reject anything that's not digits after + * the ".". strtod is happy with input like ".123e9", but that'd + * break callers' expectation that the result is in 0..1. (It's quite + * difficult to get here with such input, but not impossible.) + */ + if (strspn(cp + 1, "0123456789") != strlen(cp + 1)) + return DTERR_BAD_FORMAT; + errno = 0; *frac = strtod(cp, &cp); - /* check for parse failure */ + /* check for parse failure (probably redundant given prior check) */ if (*cp != '\0' || errno != 0) return DTERR_BAD_FORMAT; } @@ -2959,30 +2968,27 @@ DecodeNumberField(int len, char *str, int fmask, char *cp; /* + * This function was originally meant to cope only with DTK_NUMBER fields, + * but we now sometimes abuse it to parse (parts of) DTK_DATE fields, + * which can contain letters and other punctuation. Reject if it's not a + * valid DTK_NUMBER, that is digits and decimal point(s). (ParseFraction + * will reject if there's more than one decimal point.) + */ + if (strspn(str, "0123456789.") != len) + return DTERR_BAD_FORMAT; + + /* * Have a decimal point? Then this is a date or something with a seconds * field... */ if ((cp = strchr(str, '.')) != NULL) { - /* - * Can we use ParseFractionalSecond here? Not clear whether trailing - * junk should be rejected ... - */ - if (cp[1] == '\0') - { - /* avoid assuming that strtod will accept "." */ - *fsec = 0; - } - else - { - double frac; + int dterr; - errno = 0; - frac = strtod(cp, NULL); - if (errno != 0) - return DTERR_BAD_FORMAT; - *fsec = rint(frac * 1000000); - } + /* Convert the fraction and store at *fsec */ + dterr = ParseFractionalSecond(cp, fsec); + if (dterr) + return dterr; /* Now truncate off the fraction for further processing */ *cp = '\0'; len = strlen(str); diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index edee1f7880b..6e2864cbbda 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -773,8 +773,11 @@ similar_escape_internal(text *pat_text, text *esc_text) int plen, elen; bool afterescape = false; - bool incharclass = false; int nquotes = 0; + int charclass_depth = 0; /* Nesting level of character classes, + * encompassed by square brackets */ + int charclass_start = 0; /* State of the character class start, + * for carets */ p = VARDATA_ANY(pat_text); plen = VARSIZE_ANY_EXHDR(pat_text); @@ -904,7 +907,7 @@ similar_escape_internal(text *pat_text, text *esc_text) /* fast path */ if (afterescape) { - if (pchar == '"' && !incharclass) /* escape-double-quote? */ + if (pchar == '"' && charclass_depth < 1) /* escape-double-quote? */ { /* emit appropriate part separator, per notes above */ if (nquotes == 0) @@ -953,18 +956,41 @@ similar_escape_internal(text *pat_text, text *esc_text) /* SQL escape character; do not send to output */ afterescape = true; } - else if (incharclass) + else if (charclass_depth > 0) { if (pchar == '\\') *r++ = '\\'; *r++ = pchar; - if (pchar == ']') - incharclass = false; + + /* + * Ignore a closing bracket at the start of a character class. + * Such a bracket is taken literally rather than closing the + * class. "charclass_start" is 1 right at the beginning of a + * class and 2 after an initial caret. + */ + if (pchar == ']' && charclass_start > 2) + charclass_depth--; + else if (pchar == '[') + charclass_depth++; + + /* + * If there is a caret right after the opening bracket, it negates + * the character class, but a following closing bracket should + * still be treated as a normal character. That holds only for + * the first caret, so only the values 1 and 2 mean that closing + * brackets should be taken literally. + */ + if (pchar == '^') + charclass_start++; + else + charclass_start = 3; /* definitely past the start */ } else if (pchar == '[') { + /* start of a character class */ *r++ = pchar; - incharclass = true; + charclass_depth++; + charclass_start = 1; } else if (pchar == '%') { diff --git a/src/backend/utils/cache/funccache.c b/src/backend/utils/cache/funccache.c index 150c502a612..afc048a051e 100644 --- a/src/backend/utils/cache/funccache.c +++ b/src/backend/utils/cache/funccache.c @@ -491,6 +491,7 @@ cached_function_compile(FunctionCallInfo fcinfo, CachedFunctionHashKey hashkey; bool function_valid = false; bool hashkey_valid = false; + bool new_function = false; /* * Lookup the pg_proc tuple by Oid; we'll need it in any case @@ -570,13 +571,15 @@ recheck: /* * Create the new function struct, if not done already. The function - * structs are never thrown away, so keep them in TopMemoryContext. + * cache entry will be kept for the life of the backend, so put it in + * TopMemoryContext. */ Assert(cacheEntrySize >= sizeof(CachedFunction)); if (function == NULL) { function = (CachedFunction *) MemoryContextAllocZero(TopMemoryContext, cacheEntrySize); + new_function = true; } else { @@ -585,17 +588,36 @@ recheck: } /* - * Fill in the CachedFunction part. fn_hashkey and use_count remain - * zeroes for now. + * However, if function compilation fails, we'd like not to leak the + * function struct, so use a PG_TRY block to prevent that. (It's up + * to the compile callback function to avoid its own internal leakage + * in such cases.) Unfortunately, freeing the struct is only safe if + * we just allocated it: otherwise there are probably fn_extra + * pointers to it. */ - function->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data); - function->fn_tid = procTup->t_self; - function->dcallback = dcallback; + PG_TRY(); + { + /* + * Do the hard, language-specific part. + */ + ccallback(fcinfo, procTup, &hashkey, function, forValidator); + } + PG_CATCH(); + { + if (new_function) + pfree(function); + PG_RE_THROW(); + } + PG_END_TRY(); /* - * Do the hard, language-specific part. + * Fill in the CachedFunction part. (We do this last to prevent the + * function from looking valid before it's fully built.) fn_hashkey + * will be set by cfunc_hashtable_insert; use_count remains zero. */ - ccallback(fcinfo, procTup, &hashkey, function, forValidator); + function->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data); + function->fn_tid = procTup->t_self; + function->dcallback = dcallback; /* * Add the completed struct to the hash table. diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index 63f991c4f93..87ce76b18f4 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -178,13 +178,11 @@ #temp_file_limit = -1 # limits per-process temp file space # in kilobytes, or -1 for no limit +#file_copy_method = copy # copy, clone (if supported by OS) + #max_notify_queue_pages = 1048576 # limits the number of SLRU pages allocated # for NOTIFY / LISTEN queue -#file_copy_method = copy # the default is the first option - # copy - # clone (if system support is available) - # - Kernel Resources - #max_files_per_process = 1000 # min 64 diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index afa42337b11..175fe9c4273 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -152,7 +152,7 @@ InitDumpOptions(DumpOptions *opts) opts->dumpSections = DUMP_UNSECTIONED; opts->dumpSchema = true; opts->dumpData = true; - opts->dumpStatistics = true; + opts->dumpStatistics = false; } /* diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index cf34f71ea11..386e21e0c59 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -68,6 +68,7 @@ my %pgdump_runs = ( '--no-data', '--sequence-data', '--binary-upgrade', + '--with-statistics', '--dbname' => 'postgres', # alternative way to specify database ], restore_cmd => [ @@ -75,6 +76,7 @@ my %pgdump_runs = ( '--format' => 'custom', '--verbose', '--file' => "$tempdir/binary_upgrade.sql", + '--with-statistics', "$tempdir/binary_upgrade.dump", ], }, @@ -88,11 +90,13 @@ my %pgdump_runs = ( '--format' => 'custom', '--compress' => '1', '--file' => "$tempdir/compression_gzip_custom.dump", + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_gzip_custom.sql", + '--with-statistics', "$tempdir/compression_gzip_custom.dump", ], command_like => { @@ -115,6 +119,7 @@ my %pgdump_runs = ( '--format' => 'directory', '--compress' => 'gzip:1', '--file' => "$tempdir/compression_gzip_dir", + '--with-statistics', 'postgres', ], # Give coverage for manually compressed blobs.toc files during @@ -132,6 +137,7 @@ my %pgdump_runs = ( 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_gzip_dir.sql", + '--with-statistics', "$tempdir/compression_gzip_dir", ], }, @@ -144,6 +150,7 @@ my %pgdump_runs = ( '--format' => 'plain', '--compress' => '1', '--file' => "$tempdir/compression_gzip_plain.sql.gz", + '--with-statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -162,11 +169,13 @@ my %pgdump_runs = ( '--format' => 'custom', '--compress' => 'lz4', '--file' => "$tempdir/compression_lz4_custom.dump", + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_lz4_custom.sql", + '--with-statistics', "$tempdir/compression_lz4_custom.dump", ], command_like => { @@ -189,6 +198,7 @@ my %pgdump_runs = ( '--format' => 'directory', '--compress' => 'lz4:1', '--file' => "$tempdir/compression_lz4_dir", + '--with-statistics', 'postgres', ], # Verify that data files were compressed @@ -200,6 +210,7 @@ my %pgdump_runs = ( 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_lz4_dir.sql", + '--with-statistics', "$tempdir/compression_lz4_dir", ], }, @@ -212,6 +223,7 @@ my %pgdump_runs = ( '--format' => 'plain', '--compress' => 'lz4', '--file' => "$tempdir/compression_lz4_plain.sql.lz4", + '--with-statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -233,11 +245,13 @@ my %pgdump_runs = ( '--format' => 'custom', '--compress' => 'zstd', '--file' => "$tempdir/compression_zstd_custom.dump", + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_zstd_custom.sql", + '--with-statistics', "$tempdir/compression_zstd_custom.dump", ], command_like => { @@ -259,6 +273,7 @@ my %pgdump_runs = ( '--format' => 'directory', '--compress' => 'zstd:1', '--file' => "$tempdir/compression_zstd_dir", + '--with-statistics', 'postgres', ], # Give coverage for manually compressed blobs.toc files during @@ -279,6 +294,7 @@ my %pgdump_runs = ( 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_zstd_dir.sql", + '--with-statistics', "$tempdir/compression_zstd_dir", ], }, @@ -292,6 +308,7 @@ my %pgdump_runs = ( '--format' => 'plain', '--compress' => 'zstd:long', '--file' => "$tempdir/compression_zstd_plain.sql.zst", + '--with-statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -310,6 +327,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/clean.sql", '--clean', + '--with-statistics', '--dbname' => 'postgres', # alternative way to specify database ], }, @@ -320,6 +338,7 @@ my %pgdump_runs = ( '--clean', '--if-exists', '--encoding' => 'UTF8', # no-op, just for testing + '--with-statistics', 'postgres', ], }, @@ -338,6 +357,7 @@ my %pgdump_runs = ( '--create', '--no-reconnect', # no-op, just for testing '--verbose', + '--with-statistics', 'postgres', ], }, @@ -356,6 +376,7 @@ my %pgdump_runs = ( dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults.sql", + '--with-statistics', 'postgres', ], }, @@ -364,6 +385,7 @@ my %pgdump_runs = ( dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults_no_public.sql", + '--with-statistics', 'regress_pg_dump_test', ], }, @@ -373,6 +395,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--clean', '--file' => "$tempdir/defaults_no_public_clean.sql", + '--with-statistics', 'regress_pg_dump_test', ], }, @@ -381,6 +404,7 @@ my %pgdump_runs = ( dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults_public_owner.sql", + '--with-statistics', 'regress_public_owner', ], }, @@ -395,12 +419,14 @@ my %pgdump_runs = ( 'pg_dump', '--format' => 'custom', '--file' => "$tempdir/defaults_custom_format.dump", + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'custom', '--file' => "$tempdir/defaults_custom_format.sql", + '--with-statistics', "$tempdir/defaults_custom_format.dump", ], command_like => { @@ -425,12 +451,14 @@ my %pgdump_runs = ( 'pg_dump', '--format' => 'directory', '--file' => "$tempdir/defaults_dir_format", + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'directory', '--file' => "$tempdir/defaults_dir_format.sql", + '--with-statistics', "$tempdir/defaults_dir_format", ], command_like => { @@ -456,11 +484,13 @@ my %pgdump_runs = ( '--format' => 'directory', '--jobs' => 2, '--file' => "$tempdir/defaults_parallel", + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/defaults_parallel.sql", + '--with-statistics', "$tempdir/defaults_parallel", ], }, @@ -472,12 +502,14 @@ my %pgdump_runs = ( 'pg_dump', '--format' => 'tar', '--file' => "$tempdir/defaults_tar_format.tar", + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'tar', '--file' => "$tempdir/defaults_tar_format.sql", + '--with-statistics', "$tempdir/defaults_tar_format.tar", ], }, @@ -486,6 +518,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_dump_test_schema.sql", '--exclude-schema' => 'dump_test', + '--with-statistics', 'postgres', ], }, @@ -494,6 +527,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_test_table.sql", '--exclude-table' => 'dump_test.test_table', + '--with-statistics', 'postgres', ], }, @@ -502,6 +536,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_measurement.sql", '--exclude-table-and-children' => 'dump_test.measurement', + '--with-statistics', 'postgres', ], }, @@ -511,6 +546,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/exclude_measurement_data.sql", '--exclude-table-data-and-children' => 'dump_test.measurement', '--no-unlogged-table-data', + '--with-statistics', 'postgres', ], }, @@ -520,6 +556,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/exclude_test_table_data.sql", '--exclude-table-data' => 'dump_test.test_table', '--no-unlogged-table-data', + '--with-statistics', 'postgres', ], }, @@ -538,6 +575,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/pg_dumpall_globals.sql", '--globals-only', '--no-sync', + '--with-statistics', ], }, pg_dumpall_globals_clean => { @@ -547,12 +585,14 @@ my %pgdump_runs = ( '--globals-only', '--clean', '--no-sync', + '--with-statistics', ], }, pg_dumpall_dbprivs => { dump_cmd => [ 'pg_dumpall', '--no-sync', '--file' => "$tempdir/pg_dumpall_dbprivs.sql", + '--with-statistics', ], }, pg_dumpall_exclude => { @@ -562,6 +602,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/pg_dumpall_exclude.sql", '--exclude-database' => '*dump_test*', '--no-sync', + '--with-statistics', ], }, no_toast_compression => { @@ -569,6 +610,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_toast_compression.sql", '--no-toast-compression', + '--with-statistics', 'postgres', ], }, @@ -577,6 +619,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_large_objects.sql", '--no-large-objects', + '--with-statistics', 'postgres', ], }, @@ -585,6 +628,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_policies.sql", '--no-policies', + '--with-statistics', 'postgres', ], }, @@ -593,6 +637,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_privs.sql", '--no-privileges', + '--with-statistics', 'postgres', ], }, @@ -601,6 +646,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_owner.sql", '--no-owner', + '--with-statistics', 'postgres', ], }, @@ -609,6 +655,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_table_access_method.sql", '--no-table-access-method', + '--with-statistics', 'postgres', ], }, @@ -617,6 +664,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/only_dump_test_schema.sql", '--schema' => 'dump_test', + '--with-statistics', 'postgres', ], }, @@ -627,6 +675,7 @@ my %pgdump_runs = ( '--table' => 'dump_test.test_table', '--lock-wait-timeout' => (1000 * $PostgreSQL::Test::Utils::timeout_default), + '--with-statistics', 'postgres', ], }, @@ -637,6 +686,7 @@ my %pgdump_runs = ( '--table-and-children' => 'dump_test.measurement', '--lock-wait-timeout' => (1000 * $PostgreSQL::Test::Utils::timeout_default), + '--with-statistics', 'postgres', ], }, @@ -646,6 +696,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/role.sql", '--role' => 'regress_dump_test_role', '--schema' => 'dump_test_second_schema', + '--with-statistics', 'postgres', ], }, @@ -658,11 +709,13 @@ my %pgdump_runs = ( '--file' => "$tempdir/role_parallel", '--role' => 'regress_dump_test_role', '--schema' => 'dump_test_second_schema', + '--with-statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/role_parallel.sql", + '--with-statistics', "$tempdir/role_parallel", ], }, @@ -691,6 +744,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/section_pre_data.sql", '--section' => 'pre-data', + '--with-statistics', 'postgres', ], }, @@ -699,6 +753,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/section_data.sql", '--section' => 'data', + '--with-statistics', 'postgres', ], }, @@ -707,6 +762,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/section_post_data.sql", '--section' => 'post-data', + '--with-statistics', 'postgres', ], }, @@ -717,6 +773,7 @@ my %pgdump_runs = ( '--schema' => 'dump_test', '--large-objects', '--no-large-objects', + '--with-statistics', 'postgres', ], }, @@ -732,6 +789,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', "--file=$tempdir/no_data_no_schema.sql", '--no-data', '--no-schema', 'postgres', + '--with-statistics', ], }, statistics_only => { @@ -752,6 +810,7 @@ my %pgdump_runs = ( dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/no_schema.sql", '--no-schema', + '--with-statistics', 'postgres', ], },); diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c index 23cb08e8347..183f08ce1e8 100644 --- a/src/bin/pg_upgrade/dump.c +++ b/src/bin/pg_upgrade/dump.c @@ -58,7 +58,7 @@ generate_old_dump(void) (user_opts.transfer_mode == TRANSFER_MODE_SWAP) ? "" : "--sequence-data", log_opts.verbose ? "--verbose" : "", - user_opts.do_statistics ? "" : "--no-statistics", + user_opts.do_statistics ? "--with-statistics" : "--no-statistics", log_opts.dumpdir, sql_file_name, escaped_connstr.data); diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index 519f7695d7c..b80c59447fb 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -226,8 +226,13 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, /* * All the permanent output of compilation (e.g. parse tree) is kept in a * per-function memory context, so it can be reclaimed easily. + * + * While the func_cxt needs to be long-lived, we initially make it a child + * of the assumed-short-lived caller's context, and reparent it under + * CacheMemoryContext only upon success. This arrangement avoids memory + * leakage during compilation of a faulty function. */ - func_cxt = AllocSetContextCreate(TopMemoryContext, + func_cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/pgSQL function", ALLOCSET_DEFAULT_SIZES); plpgsql_compile_tmp_cxt = MemoryContextSwitchTo(func_cxt); @@ -704,6 +709,11 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, plpgsql_dumptree(function); /* + * All is well, so make the func_cxt long-lived + */ + MemoryContextSetParent(func_cxt, CacheMemoryContext); + + /* * Pop the error context stack */ error_context_stack = plerrcontext.previous; diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out index b90bfcd794f..5ae93d8e8a5 100644 --- a/src/test/regress/expected/horology.out +++ b/src/test/regress/expected/horology.out @@ -467,6 +467,15 @@ SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08'; ERROR: invalid input syntax for type timestamp with time zone: "Y2001M12D27H04MM05S06.789-08" LINE 1: SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-0... ^ +-- More examples we used to accept and should not +SELECT timestamp with time zone 'J2452271 T X03456-08'; +ERROR: invalid input syntax for type timestamp with time zone: "J2452271 T X03456-08" +LINE 1: SELECT timestamp with time zone 'J2452271 T X03456-08'; + ^ +SELECT timestamp with time zone 'J2452271 T X03456.001e6-08'; +ERROR: invalid input syntax for type timestamp with time zone: "J2452271 T X03456.001e6-08" +LINE 1: SELECT timestamp with time zone 'J2452271 T X03456.001e6-08'... + ^ -- conflicting fields should throw errors SELECT date '1995-08-06 epoch'; ERROR: invalid input syntax for type date: "1995-08-06 epoch" diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out index 174f0a68331..788844abd20 100644 --- a/src/test/regress/expected/strings.out +++ b/src/test/regress/expected/strings.out @@ -614,6 +614,73 @@ SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null; SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error; ERROR: invalid escape string HINT: Escape string must be empty or one character. +-- Characters that should be left alone in character classes when a +-- SIMILAR TO regexp pattern is converted to POSIX style. +-- Underscore "_" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '_[_[:alpha:]_]_'; + QUERY PLAN +------------------------------------------------ + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:.[_[:alpha:]_].)$'::text) +(2 rows) + +-- Percentage "%" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '%[%[:alnum:]%]%'; + QUERY PLAN +-------------------------------------------------- + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:.*[%[:alnum:]%].*)$'::text) +(2 rows) + +-- Dot "." +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '.[.[:alnum:].].'; + QUERY PLAN +-------------------------------------------------- + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:\.[.[:alnum:].]\.)$'::text) +(2 rows) + +-- Dollar "$" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '$[$[:alnum:]$]$'; + QUERY PLAN +-------------------------------------------------- + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:\$[$[:alnum:]$]\$)$'::text) +(2 rows) + +-- Opening parenthesis "(" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '()[([:alnum:](]()'; + QUERY PLAN +------------------------------------------------------ + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:(?:)[([:alnum:](](?:))$'::text) +(2 rows) + +-- Caret "^" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]^'; + QUERY PLAN +------------------------------------------------------------------------ + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:\^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]\^)$'::text) +(2 rows) + +-- Closing square bracket "]" at the beginning of character class +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[]%][^]%][^%]%'; + QUERY PLAN +------------------------------------------------ + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:[]%][^]%][^%].*)$'::text) +(2 rows) + +-- Closing square bracket effective after two carets at the beginning +-- of character class. +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[^^]^'; + QUERY PLAN +--------------------------------------- + Seq Scan on text_tbl + Filter: (f1 ~ '^(?:[^^]\^)$'::text) +(2 rows) + -- Test backslash escapes in regexp_replace's replacement string SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3'); regexp_replace diff --git a/src/test/regress/sql/horology.sql b/src/test/regress/sql/horology.sql index 1310b432773..8978249a5dc 100644 --- a/src/test/regress/sql/horology.sql +++ b/src/test/regress/sql/horology.sql @@ -102,6 +102,10 @@ SELECT date 'J J 1520447'; SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08'; SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08'; +-- More examples we used to accept and should not +SELECT timestamp with time zone 'J2452271 T X03456-08'; +SELECT timestamp with time zone 'J2452271 T X03456.001e6-08'; + -- conflicting fields should throw errors SELECT date '1995-08-06 epoch'; SELECT date '1995-08-06 infinity'; diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql index f7b325baadf..2577a42987d 100644 --- a/src/test/regress/sql/strings.sql +++ b/src/test/regress/sql/strings.sql @@ -197,6 +197,26 @@ SELECT 'abcd\efg' SIMILAR TO '_bcd\%' ESCAPE '' AS true; SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null; SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error; +-- Characters that should be left alone in character classes when a +-- SIMILAR TO regexp pattern is converted to POSIX style. +-- Underscore "_" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '_[_[:alpha:]_]_'; +-- Percentage "%" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '%[%[:alnum:]%]%'; +-- Dot "." +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '.[.[:alnum:].].'; +-- Dollar "$" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '$[$[:alnum:]$]$'; +-- Opening parenthesis "(" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '()[([:alnum:](]()'; +-- Caret "^" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]^'; +-- Closing square bracket "]" at the beginning of character class +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[]%][^]%][^%]%'; +-- Closing square bracket effective after two carets at the beginning +-- of character class. +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[^^]^'; + -- Test backslash escapes in regexp_replace's replacement string SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3'); SELECT regexp_replace('foobarrbazz', E'(.)\\1', E'X\\&Y', 'g'); |