aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c2
-rw-r--r--src/backend/parser/analyze.c17
-rw-r--r--src/backend/replication/logical/logical.c23
-rw-r--r--src/backend/storage/aio/aio.c113
-rw-r--r--src/backend/storage/aio/aio_io.c4
-rw-r--r--src/backend/utils/adt/ruleutils.c10
-rw-r--r--src/bin/pg_dump/pg_dump.c21
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl6
-rw-r--r--src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm1
-rw-r--r--src/test/regress/expected/limit.out20
-rw-r--r--src/test/regress/sql/limit.sql5
-rw-r--r--src/tools/RELEASE_CHANGES3
12 files changed, 181 insertions, 44 deletions
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 715cc1f7bad..305598e2865 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -252,6 +252,8 @@ ParsePrepareRecord(uint8 info, xl_xact_prepare *xlrec, xl_xact_parsed_prepare *p
parsed->nsubxacts = xlrec->nsubxacts;
parsed->nrels = xlrec->ncommitrels;
parsed->nabortrels = xlrec->nabortrels;
+ parsed->nstats = xlrec->ncommitstats;
+ parsed->nabortstats = xlrec->nabortstats;
parsed->nmsgs = xlrec->ninvalmsgs;
strncpy(parsed->twophase_gid, bufptr, xlrec->gidlen);
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 1f4d6adda52..a16fdd65601 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -253,20 +253,14 @@ parse_sub_analyze(Node *parseTree, ParseState *parentParseState,
* statements. However, we have the statement's location plus the length
* (p_stmt_len) and location (p_stmt_location) of the top level RawStmt,
* stored in pstate. Thus, the statement's length is the RawStmt's length
- * minus how much we've advanced in the RawStmt's string.
+ * minus how much we've advanced in the RawStmt's string. If p_stmt_len
+ * is 0, the SQL string is used up to its end.
*/
static void
setQueryLocationAndLength(ParseState *pstate, Query *qry, Node *parseTree)
{
ParseLoc stmt_len = 0;
- /*
- * If there is no information about the top RawStmt's length, leave it at
- * 0 to use the whole string.
- */
- if (pstate->p_stmt_len == 0)
- return;
-
switch (nodeTag(parseTree))
{
case T_InsertStmt:
@@ -308,11 +302,12 @@ setQueryLocationAndLength(ParseState *pstate, Query *qry, Node *parseTree)
/* Statement's length is known, use it */
qry->stmt_len = stmt_len;
}
- else
+ else if (pstate->p_stmt_len > 0)
{
/*
- * Compute the statement's length from the statement's location and
- * the RawStmt's length and location.
+ * The top RawStmt's length is known, so calculate the statement's
+ * length from the statement's location and the RawStmt's length and
+ * location.
*/
qry->stmt_len = pstate->p_stmt_len - (qry->stmt_location - pstate->p_stmt_location);
}
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index a8d2e024d34..1d56d0c4ef3 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -1828,7 +1828,19 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
SpinLockAcquire(&MyReplicationSlot->mutex);
- MyReplicationSlot->data.confirmed_flush = lsn;
+ /*
+ * Prevent moving the confirmed_flush backwards, as this could lead to
+ * data duplication issues caused by replicating already replicated
+ * changes.
+ *
+ * This can happen when a client acknowledges an LSN it doesn't have
+ * to do anything for, and thus didn't store persistently. After a
+ * restart, the client can send the prior LSN that it stored
+ * persistently as an acknowledgement, but we need to ignore such an
+ * LSN. See similar case handling in CreateDecodingContext.
+ */
+ if (lsn > MyReplicationSlot->data.confirmed_flush)
+ MyReplicationSlot->data.confirmed_flush = lsn;
/* if we're past the location required for bumping xmin, do so */
if (MyReplicationSlot->candidate_xmin_lsn != InvalidXLogRecPtr &&
@@ -1893,7 +1905,14 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
else
{
SpinLockAcquire(&MyReplicationSlot->mutex);
- MyReplicationSlot->data.confirmed_flush = lsn;
+
+ /*
+ * Prevent moving the confirmed_flush backwards. See comments above
+ * for the details.
+ */
+ if (lsn > MyReplicationSlot->data.confirmed_flush)
+ MyReplicationSlot->data.confirmed_flush = lsn;
+
SpinLockRelease(&MyReplicationSlot->mutex);
}
}
diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c
index ebb5a771bfd..c64d815ebd1 100644
--- a/src/backend/storage/aio/aio.c
+++ b/src/backend/storage/aio/aio.c
@@ -184,6 +184,8 @@ pgaio_io_acquire(struct ResourceOwnerData *resowner, PgAioReturn *ret)
PgAioHandle *
pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
{
+ PgAioHandle *ioh = NULL;
+
if (pgaio_my_backend->num_staged_ios >= PGAIO_SUBMIT_BATCH_SIZE)
{
Assert(pgaio_my_backend->num_staged_ios == PGAIO_SUBMIT_BATCH_SIZE);
@@ -193,10 +195,17 @@ pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
if (pgaio_my_backend->handed_out_io)
elog(ERROR, "API violation: Only one IO can be handed out");
+ /*
+ * Probably not needed today, as interrupts should not process this IO,
+ * but...
+ */
+ HOLD_INTERRUPTS();
+
if (!dclist_is_empty(&pgaio_my_backend->idle_ios))
{
dlist_node *ion = dclist_pop_head_node(&pgaio_my_backend->idle_ios);
- PgAioHandle *ioh = dclist_container(PgAioHandle, node, ion);
+
+ ioh = dclist_container(PgAioHandle, node, ion);
Assert(ioh->state == PGAIO_HS_IDLE);
Assert(ioh->owner_procno == MyProcNumber);
@@ -212,11 +221,11 @@ pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
ioh->report_return = ret;
ret->result.status = PGAIO_RS_UNKNOWN;
}
-
- return ioh;
}
- return NULL;
+ RESUME_INTERRUPTS();
+
+ return ioh;
}
/*
@@ -233,6 +242,12 @@ pgaio_io_release(PgAioHandle *ioh)
Assert(ioh->resowner);
pgaio_my_backend->handed_out_io = NULL;
+
+ /*
+ * Note that no interrupts are processed between the handed_out_io
+ * check and the call to reclaim - that's important as otherwise an
+ * interrupt could have already reclaimed the handle.
+ */
pgaio_io_reclaim(ioh);
}
else
@@ -251,6 +266,12 @@ pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error)
Assert(ioh->resowner);
+ /*
+ * Otherwise an interrupt, in the middle of releasing the IO, could end up
+ * trying to wait for the IO, leading to state confusion.
+ */
+ HOLD_INTERRUPTS();
+
ResourceOwnerForgetAioHandle(ioh->resowner, &ioh->resowner_node);
ioh->resowner = NULL;
@@ -291,6 +312,8 @@ pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error)
*/
if (ioh->report_return)
ioh->report_return = NULL;
+
+ RESUME_INTERRUPTS();
}
/*
@@ -359,6 +382,13 @@ pgaio_io_get_wref(PgAioHandle *ioh, PgAioWaitRef *iow)
static inline void
pgaio_io_update_state(PgAioHandle *ioh, PgAioHandleState new_state)
{
+ /*
+ * All callers need to have held interrupts in some form, otherwise
+ * interrupt processing could wait for the IO to complete, while in an
+ * intermediary state.
+ */
+ Assert(!INTERRUPTS_CAN_BE_PROCESSED());
+
pgaio_debug_io(DEBUG5, ioh,
"updating state to %s",
pgaio_io_state_get_name(new_state));
@@ -396,6 +426,13 @@ pgaio_io_stage(PgAioHandle *ioh, PgAioOp op)
Assert(pgaio_my_backend->handed_out_io == ioh);
Assert(pgaio_io_has_target(ioh));
+ /*
+ * Otherwise an interrupt, in the middle of staging and possibly executing
+ * the IO, could end up trying to wait for the IO, leading to state
+ * confusion.
+ */
+ HOLD_INTERRUPTS();
+
ioh->op = op;
ioh->result = 0;
@@ -435,6 +472,8 @@ pgaio_io_stage(PgAioHandle *ioh, PgAioOp op)
pgaio_io_prepare_submit(ioh);
pgaio_io_perform_synchronously(ioh);
}
+
+ RESUME_INTERRUPTS();
}
bool
@@ -544,8 +583,8 @@ pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation)
&& state != PGAIO_HS_COMPLETED_SHARED
&& state != PGAIO_HS_COMPLETED_LOCAL)
{
- elog(PANIC, "waiting for own IO in wrong state: %d",
- state);
+ elog(PANIC, "waiting for own IO %d in wrong state: %s",
+ pgaio_io_get_id(ioh), pgaio_io_get_state_name(ioh));
}
}
@@ -599,7 +638,13 @@ pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation)
case PGAIO_HS_COMPLETED_SHARED:
case PGAIO_HS_COMPLETED_LOCAL:
- /* see above */
+
+ /*
+ * Note that no interrupts are processed between
+ * pgaio_io_was_recycled() and this check - that's important
+ * as otherwise an interrupt could have already reclaimed the
+ * handle.
+ */
if (am_owner)
pgaio_io_reclaim(ioh);
return;
@@ -610,6 +655,11 @@ pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation)
/*
* Make IO handle ready to be reused after IO has completed or after the
* handle has been released without being used.
+ *
+ * Note that callers need to be careful about only calling this in the right
+ * state and that no interrupts can be processed between the state check and
+ * the call to pgaio_io_reclaim(). Otherwise interrupt processing could
+ * already have reclaimed the handle.
*/
static void
pgaio_io_reclaim(PgAioHandle *ioh)
@@ -618,6 +668,9 @@ pgaio_io_reclaim(PgAioHandle *ioh)
Assert(ioh->owner_procno == MyProcNumber);
Assert(ioh->state != PGAIO_HS_IDLE);
+ /* see comment in function header */
+ HOLD_INTERRUPTS();
+
/*
* It's a bit ugly, but right now the easiest place to put the execution
* of local completion callbacks is this function, as we need to execute
@@ -685,6 +738,8 @@ pgaio_io_reclaim(PgAioHandle *ioh)
* efficient in cases where only a few IOs are used.
*/
dclist_push_head(&pgaio_my_backend->idle_ios, &ioh->node);
+
+ RESUME_INTERRUPTS();
}
/*
@@ -700,7 +755,7 @@ pgaio_io_wait_for_free(void)
pgaio_debug(DEBUG2, "waiting for free IO with %d pending, %d in-flight, %d idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
- dclist_is_empty(&pgaio_my_backend->idle_ios));
+ dclist_count(&pgaio_my_backend->idle_ios));
/*
* First check if any of our IOs actually have completed - when using
@@ -714,6 +769,11 @@ pgaio_io_wait_for_free(void)
if (ioh->state == PGAIO_HS_COMPLETED_SHARED)
{
+ /*
+ * Note that no interrupts are processed between the state check
+ * and the call to reclaim - that's important as otherwise an
+ * interrupt could have already reclaimed the handle.
+ */
pgaio_io_reclaim(ioh);
reclaimed++;
}
@@ -730,13 +790,17 @@ pgaio_io_wait_for_free(void)
if (pgaio_my_backend->num_staged_ios > 0)
pgaio_submit_staged();
+ /* possibly some IOs finished during submission */
+ if (!dclist_is_empty(&pgaio_my_backend->idle_ios))
+ return;
+
if (dclist_count(&pgaio_my_backend->in_flight_ios) == 0)
ereport(ERROR,
errmsg_internal("no free IOs despite no in-flight IOs"),
errdetail_internal("%d pending, %d in-flight, %d idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
- dclist_is_empty(&pgaio_my_backend->idle_ios)));
+ dclist_count(&pgaio_my_backend->idle_ios)));
/*
* Wait for the oldest in-flight IO to complete.
@@ -747,6 +811,7 @@ pgaio_io_wait_for_free(void)
{
PgAioHandle *ioh = dclist_head_element(PgAioHandle, node,
&pgaio_my_backend->in_flight_ios);
+ uint64 generation = ioh->generation;
switch (ioh->state)
{
@@ -770,13 +835,24 @@ pgaio_io_wait_for_free(void)
* In a more general case this would be racy, because the
* generation could increase after we read ioh->state above.
* But we are only looking at IOs by the current backend and
- * the IO can only be recycled by this backend.
+ * the IO can only be recycled by this backend. Even this is
+ * only OK because we get the handle's generation before
+ * potentially processing interrupts, e.g. as part of
+ * pgaio_debug_io().
*/
- pgaio_io_wait(ioh, ioh->generation);
+ pgaio_io_wait(ioh, generation);
break;
case PGAIO_HS_COMPLETED_SHARED:
- /* it's possible that another backend just finished this IO */
+
+ /*
+ * It's possible that another backend just finished this IO.
+ *
+ * Note that no interrupts are processed between the state
+ * check and the call to reclaim - that's important as
+ * otherwise an interrupt could have already reclaimed the
+ * handle.
+ */
pgaio_io_reclaim(ioh);
break;
}
@@ -926,6 +1002,11 @@ pgaio_wref_check_done(PgAioWaitRef *iow)
if (state == PGAIO_HS_COMPLETED_SHARED ||
state == PGAIO_HS_COMPLETED_LOCAL)
{
+ /*
+ * Note that no interrupts are processed between
+ * pgaio_io_was_recycled() and this check - that's important as
+ * otherwise an interrupt could have already reclaimed the handle.
+ */
if (am_owner)
pgaio_io_reclaim(ioh);
return true;
@@ -1153,11 +1234,14 @@ pgaio_closing_fd(int fd)
{
dlist_iter iter;
PgAioHandle *ioh = NULL;
+ uint64 generation;
dclist_foreach(iter, &pgaio_my_backend->in_flight_ios)
{
ioh = dclist_container(PgAioHandle, node, iter.cur);
+ generation = ioh->generation;
+
if (pgaio_io_uses_fd(ioh, fd))
break;
else
@@ -1172,7 +1256,7 @@ pgaio_closing_fd(int fd)
fd, dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
- pgaio_io_wait(ioh, ioh->generation);
+ pgaio_io_wait(ioh, generation);
}
}
}
@@ -1201,13 +1285,14 @@ pgaio_shutdown(int code, Datum arg)
while (!dclist_is_empty(&pgaio_my_backend->in_flight_ios))
{
PgAioHandle *ioh = dclist_head_element(PgAioHandle, node, &pgaio_my_backend->in_flight_ios);
+ uint64 generation = ioh->generation;
pgaio_debug_io(DEBUG2, ioh,
"waiting for IO to complete during shutdown, %d in-flight IOs",
dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
- pgaio_io_wait(ioh, ioh->generation);
+ pgaio_io_wait(ioh, generation);
}
pgaio_my_backend = NULL;
diff --git a/src/backend/storage/aio/aio_io.c b/src/backend/storage/aio/aio_io.c
index 00e176135a6..520b5077df2 100644
--- a/src/backend/storage/aio/aio_io.c
+++ b/src/backend/storage/aio/aio_io.c
@@ -181,9 +181,9 @@ pgaio_io_get_op_name(PgAioHandle *ioh)
case PGAIO_OP_INVALID:
return "invalid";
case PGAIO_OP_READV:
- return "read";
+ return "readv";
case PGAIO_OP_WRITEV:
- return "write";
+ return "writev";
}
return NULL; /* silence compiler */
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 467b08198b8..3d6e6bdbfd2 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -5956,9 +5956,19 @@ get_select_query_def(Query *query, deparse_context *context)
{
if (query->limitOption == LIMIT_OPTION_WITH_TIES)
{
+ /*
+ * The limitCount arg is a c_expr, so it needs parens. Simple
+ * literals and function expressions would not need parens, but
+ * unfortunately it's hard to tell if the expression will be
+ * printed as a simple literal like 123 or as a typecast
+ * expression, like '-123'::int4. The grammar accepts the former
+ * without quoting, but not the latter.
+ */
appendContextKeyword(context, " FETCH FIRST ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ appendStringInfoChar(buf, '(');
get_rule_expr(query->limitCount, context, false);
+ appendStringInfoChar(buf, ')');
appendStringInfoString(buf, " ROWS WITH TIES");
}
else
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index e2e7975b34e..c73e73a87d1 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -10765,6 +10765,9 @@ fetchAttributeStats(Archive *fout)
restarted = true;
}
+ appendPQExpBufferChar(nspnames, '{');
+ appendPQExpBufferChar(relnames, '{');
+
/*
* Scan the TOC for the next set of relevant stats entries. We assume
* that statistics are dumped in the order they are listed in the TOC.
@@ -10776,23 +10779,25 @@ fetchAttributeStats(Archive *fout)
if ((te->reqs & REQ_STATS) != 0 &&
strcmp(te->desc, "STATISTICS DATA") == 0)
{
- appendPQExpBuffer(nspnames, "%s%s", count ? "," : "",
- fmtId(te->namespace));
- appendPQExpBuffer(relnames, "%s%s", count ? "," : "",
- fmtId(te->tag));
+ appendPGArray(nspnames, te->namespace);
+ appendPGArray(relnames, te->tag);
count++;
}
}
+ appendPQExpBufferChar(nspnames, '}');
+ appendPQExpBufferChar(relnames, '}');
+
/* Execute the query for the next batch of relations. */
if (count > 0)
{
PQExpBuffer query = createPQExpBuffer();
- appendPQExpBuffer(query, "EXECUTE getAttributeStats("
- "'{%s}'::pg_catalog.name[],"
- "'{%s}'::pg_catalog.name[])",
- nspnames->data, relnames->data);
+ appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
+ appendStringLiteralAH(query, nspnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[],");
+ appendStringLiteralAH(query, relnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[])");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
destroyPQExpBuffer(query);
}
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 55d892d9c16..cf34f71ea11 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -4834,13 +4834,13 @@ my %tests = (
CREATE TABLE dump_test.has_stats
AS SELECT g.g AS x, g.g / 2 AS y FROM generate_series(1,100) AS g(g);
CREATE MATERIALIZED VIEW dump_test.has_stats_mv AS SELECT * FROM dump_test.has_stats;
- CREATE INDEX dup_test_post_data_ix ON dump_test.has_stats(x, (x - 1));
+ CREATE INDEX """dump_test""\'s post-data index" ON dump_test.has_stats(x, (x - 1));
ANALYZE dump_test.has_stats, dump_test.has_stats_mv;',
regexp => qr/^
\QSELECT * FROM pg_catalog.pg_restore_relation_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'relpages',\s'\d+'::integer,\s+
'reltuples',\s'\d+'::real,\s+
'relallvisible',\s'\d+'::integer,\s+
@@ -4849,7 +4849,7 @@ my %tests = (
\QSELECT * FROM pg_catalog.pg_restore_attribute_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'attnum',\s'2'::smallint,\s+
'inherited',\s'f'::boolean,\s+
'null_frac',\s'0'::real,\s+
diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
index 609275e2c26..1725fe2f948 100644
--- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
+++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
@@ -538,6 +538,7 @@ my @_unused_view_qualifiers = (
{ obj => 'VIEW public.limit_thousand_v_2', qual => 'onek' },
{ obj => 'VIEW public.limit_thousand_v_3', qual => 'onek' },
{ obj => 'VIEW public.limit_thousand_v_4', qual => 'onek' },
+ { obj => 'VIEW public.limit_thousand_v_5', qual => 'onek' },
# Since 14
{ obj => 'MATERIALIZED VIEW public.compressmv', qual => 'cmdata1' });
diff --git a/src/test/regress/expected/limit.out b/src/test/regress/expected/limit.out
index f4267c002d7..e3bcc680653 100644
--- a/src/test/regress/expected/limit.out
+++ b/src/test/regress/expected/limit.out
@@ -647,7 +647,7 @@ View definition:
WHERE thousand < 995
ORDER BY thousand
OFFSET 10
- FETCH FIRST 5 ROWS WITH TIES;
+ FETCH FIRST (5) ROWS WITH TIES;
CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995
ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY;
@@ -679,10 +679,10 @@ View definition:
FROM onek
WHERE thousand < 995
ORDER BY thousand
- FETCH FIRST (NULL::integer + 1) ROWS WITH TIES;
+ FETCH FIRST ((NULL::integer + 1)) ROWS WITH TIES;
CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+ ORDER BY thousand FETCH FIRST (5::bigint) ROWS WITH TIES;
\d+ limit_thousand_v_4
View "public.limit_thousand_v_4"
Column | Type | Collation | Nullable | Default | Storage | Description
@@ -693,6 +693,20 @@ View definition:
FROM onek
WHERE thousand < 995
ORDER BY thousand
+ FETCH FIRST (5::bigint) ROWS WITH TIES;
+
+CREATE VIEW limit_thousand_v_5 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_5
+ View "public.limit_thousand_v_5"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+----------+---------+-----------+----------+---------+---------+-------------
+ thousand | integer | | | | plain |
+View definition:
+ SELECT thousand
+ FROM onek
+ WHERE thousand < 995
+ ORDER BY thousand
LIMIT ALL;
-- leave these views
diff --git a/src/test/regress/sql/limit.sql b/src/test/regress/sql/limit.sql
index 6f0cda98701..603910fe6d1 100644
--- a/src/test/regress/sql/limit.sql
+++ b/src/test/regress/sql/limit.sql
@@ -196,6 +196,9 @@ CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES;
\d+ limit_thousand_v_3
CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+ ORDER BY thousand FETCH FIRST (5::bigint) ROWS WITH TIES;
\d+ limit_thousand_v_4
+CREATE VIEW limit_thousand_v_5 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_5
-- leave these views
diff --git a/src/tools/RELEASE_CHANGES b/src/tools/RELEASE_CHANGES
index 94c5a0f3bfb..c0d75c213be 100644
--- a/src/tools/RELEASE_CHANGES
+++ b/src/tools/RELEASE_CHANGES
@@ -89,6 +89,9 @@ Starting a New Development Cycle
* Typically, we do pgindent and perltidy runs just before branching,
as well as before beta (complete steps from src/tools/pgindent/README)
+* It's also advisable to check that copyright years are up-to-date
+ (run src/tools/copyright.pl, commit any changes it finds)
+
* Create a branch in git for maintenance of the previous release
o on master branch, do:
git pull # be sure you have the latest "master"