aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/pg_prewarm/autoprewarm.c11
-rw-r--r--doc/src/sgml/catalogs.sgml8
-rw-r--r--doc/src/sgml/ref/security_label.sgml4
-rw-r--r--doc/src/sgml/release-18.sgml21
-rw-r--r--doc/src/sgml/system-views.sgml2
-rw-r--r--doc/src/sgml/trigger.sgml7
-rw-r--r--src/backend/access/nbtree/nbtree.c30
-rw-r--r--src/backend/access/nbtree/nbtsearch.c70
-rw-r--r--src/backend/access/nbtree/nbtutils.c93
-rw-r--r--src/backend/commands/dbcommands.c41
-rw-r--r--src/backend/commands/tablecmds.c58
-rw-r--r--src/backend/replication/logical/launcher.c2
-rw-r--r--src/backend/storage/aio/method_io_uring.c2
-rw-r--r--src/backend/utils/activity/pgstat_shmem.c5
-rw-r--r--src/backend/utils/fmgr/dfmgr.c16
-rw-r--r--src/bin/psql/common.c15
-rw-r--r--src/bin/psql/describe.c4
-rw-r--r--src/bin/psql/t/001_basic.pl22
-rw-r--r--src/include/access/nbtree.h5
-rw-r--r--src/test/modules/test_dsm_registry/test_dsm_registry.c4
-rw-r--r--src/test/regress/expected/foreign_key.out79
-rw-r--r--src/test/regress/expected/triggers.out26
-rw-r--r--src/test/regress/sql/foreign_key.sql36
-rw-r--r--src/test/regress/sql/triggers.sql18
24 files changed, 391 insertions, 188 deletions
diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c
index c52f4d4dc9e..c01b9c7e6a4 100644
--- a/contrib/pg_prewarm/autoprewarm.c
+++ b/contrib/pg_prewarm/autoprewarm.c
@@ -693,8 +693,15 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
return 0;
}
- block_info_array =
- (BlockInfoRecord *) palloc(sizeof(BlockInfoRecord) * NBuffers);
+ /*
+ * With sufficiently large shared_buffers, allocation will exceed 1GB, so
+ * allow for a huge allocation to prevent outright failure.
+ *
+ * (In the future, it might be a good idea to redesign this to use a more
+ * memory-efficient data structure.)
+ */
+ block_info_array = (BlockInfoRecord *)
+ palloc_extended((sizeof(BlockInfoRecord) * NBuffers), MCXT_ALLOC_HUGE);
for (num_blocks = 0, i = 0; i < NBuffers; i++)
{
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index cbd4e40a320..fa86c569dc4 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -1582,7 +1582,7 @@
<structfield>rolpassword</structfield> <type>text</type>
</para>
<para>
- Password (possibly encrypted); null if none. The format depends
+ Encrypted password; null if none. The format depends
on the form of encryption used.
</para></entry>
</row>
@@ -1627,11 +1627,6 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<replaceable>ServerKey</replaceable> are in Base64 encoded format. This format is
the same as that specified by <ulink url="https://datatracker.ietf.org/doc/html/rfc5803">RFC 5803</ulink>.
</para>
-
- <para>
- A password that does not follow either of those formats is assumed to be
- unencrypted.
- </para>
</sect1>
@@ -2629,7 +2624,6 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
Has the constraint been validated?
- Currently, can be false only for foreign keys and CHECK constraints
</para></entry>
</row>
diff --git a/doc/src/sgml/ref/security_label.sgml b/doc/src/sgml/ref/security_label.sgml
index e5e5fb483e9..aa45c0af248 100644
--- a/doc/src/sgml/ref/security_label.sgml
+++ b/doc/src/sgml/ref/security_label.sgml
@@ -84,6 +84,10 @@ SECURITY LABEL [ FOR <replaceable class="parameter">provider</replaceable> ] ON
based on object labels, rather than traditional discretionary access control
(DAC) concepts such as users and groups.
</para>
+
+ <para>
+ You must own the database object to use <command>SECURITY LABEL</command>.
+ </para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml
index d7340465e79..19e770c65b5 100644
--- a/doc/src/sgml/release-18.sgml
+++ b/doc/src/sgml/release-18.sgml
@@ -194,6 +194,27 @@ These were previously zero-based.
</para>
</listitem>
+<!--
+Author: Peter Eisentraut <peter@eisentraut.org>
+2024-10-16 [04bec894a04] initdb: Change default to using data checksums.
+-->
+
+<listitem>
+<para>
+initdb defaults to enabling data checksums
+<ulink url="&commit_baseurl;04bec894a04">&sect;</ulink>
+</para>
+
+<para>
+The previous default behavior (checksums disabled) can be obtained using the
+new option --no-data-checksums. Note that pg_upgrade will reject upgrading
+between clusters with different checksum settings, so if the old cluster does
+not have checksums enabled (the previous default), then the new cluster will
+need to be initialized with --no-data-checksums in order to allow pg_upgrade
+to succeed.
+</para>
+</listitem>
+
</itemizedlist>
</sect2>
diff --git a/doc/src/sgml/system-views.sgml b/doc/src/sgml/system-views.sgml
index b58c52ea50f..986ae1f543d 100644
--- a/doc/src/sgml/system-views.sgml
+++ b/doc/src/sgml/system-views.sgml
@@ -3932,7 +3932,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<structfield>passwd</structfield> <type>text</type>
</para>
<para>
- Password (possibly encrypted); null if none. See
+ Encrypted password; null if none. See
<link linkend="catalog-pg-authid"><structname>pg_authid</structname></link>
for details of how encrypted passwords are stored.
</para></entry>
diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml
index e9214dcf1b1..bb1b5faf34e 100644
--- a/doc/src/sgml/trigger.sgml
+++ b/doc/src/sgml/trigger.sgml
@@ -129,10 +129,9 @@
In all cases, a trigger is executed as part of the same transaction as
the statement that triggered it, so if either the statement or the
trigger causes an error, the effects of both will be rolled back.
- Also, the trigger will always run in the security context of the role
- that executed the statement that caused the trigger to fire, unless
- the trigger function is defined as <literal>SECURITY DEFINER</literal>,
- in which case it will run as the function owner.
+ Also, the trigger will always run as the role that queued the trigger
+ event, unless the trigger function is marked as <literal>SECURITY
+ DEFINER</literal>, in which case it will run as the function owner.
</para>
<para>
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 765659887af..03a1d7b027a 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -228,6 +228,8 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
BTScanOpaque so = (BTScanOpaque) scan->opaque;
bool res;
+ Assert(scan->heapRelation != NULL);
+
/* btree indexes are never lossy */
scan->xs_recheck = false;
@@ -289,6 +291,8 @@ btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
int64 ntids = 0;
ItemPointer heapTid;
+ Assert(scan->heapRelation == NULL);
+
/* Each loop iteration performs another primitive index scan */
do
{
@@ -393,6 +397,32 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
BTScanPosInvalidate(so->currPos);
}
+ /*
+ * We prefer to eagerly drop leaf page pins before btgettuple returns.
+ * This avoids making VACUUM wait to acquire a cleanup lock on the page.
+ *
+ * We cannot safely drop leaf page pins during index-only scans due to a
+ * race condition involving VACUUM setting pages all-visible in the VM.
+ * It's also unsafe for plain index scans that use a non-MVCC snapshot.
+ *
+ * When we drop pins eagerly, the mechanism that marks so->killedItems[]
+ * index tuples LP_DEAD has to deal with concurrent TID recycling races.
+ * The scheme used to detect unsafe TID recycling won't work when scanning
+ * unlogged relations (since it involves saving an affected page's LSN).
+ * Opt out of eager pin dropping during unlogged relation scans for now
+ * (this is preferable to opting out of kill_prior_tuple LP_DEAD setting).
+ *
+ * Also opt out of dropping leaf page pins eagerly during bitmap scans.
+ * Pins cannot be held for more than an instant during bitmap scans either
+ * way, so we might as well avoid wasting cycles on acquiring page LSNs.
+ *
+ * See nbtree/README section on making concurrent TID recycling safe.
+ */
+ so->dropPin = (!scan->xs_want_itup &&
+ IsMVCCSnapshot(scan->xs_snapshot) &&
+ RelationNeedsWAL(scan->indexRelation) &&
+ scan->heapRelation != NULL);
+
so->markItemIndex = -1;
so->needPrimScan = false;
so->scanBehind = false;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index fe9a3886913..070f14c8b91 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -25,7 +25,7 @@
#include "utils/rel.h"
-static void _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp);
+static inline void _bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so);
static Buffer _bt_moveright(Relation rel, Relation heaprel, BTScanInsert key,
Buffer buf, bool forupdate, BTStack stack,
int access);
@@ -57,24 +57,29 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
/*
* _bt_drop_lock_and_maybe_pin()
*
- * Unlock the buffer; and if it is safe to release the pin, do that, too.
- * This will prevent vacuum from stalling in a blocked state trying to read a
- * page when a cursor is sitting on it.
- *
- * See nbtree/README section on making concurrent TID recycling safe.
+ * Unlock so->currPos.buf. If scan is so->dropPin, drop the pin, too.
+ * Dropping the pin prevents VACUUM from blocking on acquiring a cleanup lock.
*/
-static void
-_bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp)
+static inline void
+_bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so)
{
- _bt_unlockbuf(scan->indexRelation, sp->buf);
-
- if (IsMVCCSnapshot(scan->xs_snapshot) &&
- RelationNeedsWAL(scan->indexRelation) &&
- !scan->xs_want_itup)
+ if (!so->dropPin)
{
- ReleaseBuffer(sp->buf);
- sp->buf = InvalidBuffer;
+ /* Just drop the lock (not the pin) */
+ _bt_unlockbuf(rel, so->currPos.buf);
+ return;
}
+
+ /*
+ * Drop both the lock and the pin.
+ *
+ * Have to set so->currPos.lsn so that _bt_killitems has a way to detect
+ * when concurrent heap TID recycling by VACUUM might have taken place.
+ */
+ Assert(RelationNeedsWAL(rel));
+ so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
+ _bt_relbuf(rel, so->currPos.buf);
+ so->currPos.buf = InvalidBuffer;
}
/*
@@ -866,8 +871,8 @@ _bt_compare(Relation rel,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, data about the
* matching tuple(s) on the page has been loaded into so->currPos. We'll
- * drop all locks and hold onto a pin on page's buffer, except when
- * _bt_drop_lock_and_maybe_pin dropped the pin to avoid blocking VACUUM.
+ * drop all locks and hold onto a pin on page's buffer, except during
+ * so->dropPin scans, when we drop both the lock and the pin.
* _bt_returnitem sets the next item to return to scan on success exit.
*
* If there are no matching items in the index, we return false, with no
@@ -1610,7 +1615,13 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
so->currPos.currPage = BufferGetBlockNumber(so->currPos.buf);
so->currPos.prevPage = opaque->btpo_prev;
so->currPos.nextPage = opaque->btpo_next;
+ /* delay setting so->currPos.lsn until _bt_drop_lock_and_maybe_pin */
+ so->currPos.dir = dir;
+ so->currPos.nextTupleOffset = 0;
+ /* either moreRight or moreLeft should be set now (may be unset later) */
+ Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
+ so->currPos.moreLeft);
Assert(!P_IGNORE(opaque));
Assert(BTScanPosIsPinned(so->currPos));
Assert(!so->needPrimScan);
@@ -1626,14 +1637,6 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
so->currPos.currPage);
}
- /* initialize remaining currPos fields related to current page */
- so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
- so->currPos.dir = dir;
- so->currPos.nextTupleOffset = 0;
- /* either moreLeft or moreRight should be set now (may be unset later) */
- Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
- so->currPos.moreLeft);
-
PredicateLockPage(rel, so->currPos.currPage, scan->xs_snapshot);
/* initialize local variables */
@@ -2107,10 +2110,9 @@ _bt_returnitem(IndexScanDesc scan, BTScanOpaque so)
*
* Wrapper on _bt_readnextpage that performs final steps for the current page.
*
- * On entry, if so->currPos.buf is valid the buffer is pinned but not locked.
- * If there's no pin held, it's because _bt_drop_lock_and_maybe_pin dropped
- * the pin eagerly earlier on. The scan must have so->currPos.currPage set to
- * a valid block, in any case.
+ * On entry, so->currPos must be valid. Its buffer will be pinned, though
+ * never locked. (Actually, when so->dropPin there won't even be a pin held,
+ * though so->currPos.currPage must still be set to a valid block number.)
*/
static bool
_bt_steppage(IndexScanDesc scan, ScanDirection dir)
@@ -2251,12 +2253,14 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
*/
if (_bt_readpage(scan, dir, offnum, true))
{
+ Relation rel = scan->indexRelation;
+
/*
* _bt_readpage succeeded. Drop the lock (and maybe the pin) on
* so->currPos.buf in preparation for btgettuple returning tuples.
*/
Assert(BTScanPosIsPinned(so->currPos));
- _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
+ _bt_drop_lock_and_maybe_pin(rel, so);
return true;
}
@@ -2294,8 +2298,8 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
*
* On success exit, so->currPos is updated to contain data from the next
* interesting page, and we return true. We hold a pin on the buffer on
- * success exit, except when _bt_drop_lock_and_maybe_pin decided it was safe
- * to eagerly drop the pin (to avoid blocking VACUUM).
+ * success exit (except during so->dropPin index scans, when we drop the pin
+ * eagerly to avoid blocking VACUUM).
*
* If there are no more matching records in the given direction, we drop all
* locks and pins, invalidate so->currPos, and return false.
@@ -2413,7 +2417,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno,
*/
Assert(so->currPos.currPage == blkno);
Assert(BTScanPosIsPinned(so->currPos));
- _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
+ _bt_drop_lock_and_maybe_pin(rel, so);
return true;
}
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 1a15dfcb7d3..29f0dca1b08 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -63,7 +63,7 @@ static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
bool *continuescan, int *ikey);
static bool _bt_check_rowcompare(ScanKey skey,
IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
- ScanDirection dir, bool forcenonrequired, bool *continuescan);
+ ScanDirection dir, bool *continuescan);
static void _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
int tupnatts, TupleDesc tupdesc);
static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
@@ -2902,8 +2902,10 @@ _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
/* row-comparison keys need special processing */
if (key->sk_flags & SK_ROW_HEADER)
{
+ Assert(!forcenonrequired); /* forbidden by _bt_set_startikey */
+
if (_bt_check_rowcompare(key, tuple, tupnatts, tupdesc, dir,
- forcenonrequired, continuescan))
+ continuescan))
continue;
return false;
}
@@ -3060,8 +3062,7 @@ _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
*/
static bool
_bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
- TupleDesc tupdesc, ScanDirection dir,
- bool forcenonrequired, bool *continuescan)
+ TupleDesc tupdesc, ScanDirection dir, bool *continuescan)
{
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
int32 cmpresult = 0;
@@ -3101,11 +3102,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
if (isNull)
{
- if (forcenonrequired)
- {
- /* treating scan's keys as non-required */
- }
- else if (subkey->sk_flags & SK_BT_NULLS_FIRST)
+ if (subkey->sk_flags & SK_BT_NULLS_FIRST)
{
/*
* Since NULLs are sorted before non-NULLs, we know we have
@@ -3159,12 +3156,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
*/
Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument));
subkey--;
- if (forcenonrequired)
- {
- /* treating scan's keys as non-required */
- }
- else if ((subkey->sk_flags & SK_BT_REQFWD) &&
- ScanDirectionIsForward(dir))
+ if ((subkey->sk_flags & SK_BT_REQFWD) &&
+ ScanDirectionIsForward(dir))
*continuescan = false;
else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
ScanDirectionIsBackward(dir))
@@ -3216,7 +3209,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
break;
}
- if (!result && !forcenonrequired)
+ if (!result)
{
/*
* Tuple fails this qual. If it's a required qual for the current
@@ -3342,75 +3335,71 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
*
* Note that if we hold a pin on the target page continuously from initially
* reading the items until applying this function, VACUUM cannot have deleted
- * any items from the page, and so there is no need to search left from the
- * recorded offset. (This observation also guarantees that the item is still
- * the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.) This holds true even if the page has been modified
- * by inserts and page splits, so there is no need to consult the LSN.
- *
- * If the pin was released after reading the page, then we re-read it. If it
- * has been modified since we read it (as determined by the LSN), we dare not
- * flag any entries because it is possible that the old entry was vacuumed
- * away and the TID was re-used by a completely different heap tuple.
+ * any items on the page, so the page's TIDs can't have been recycled by now.
+ * There's no risk that we'll confuse a new index tuple that happens to use a
+ * recycled TID with a now-removed tuple with the same TID (that used to be on
+ * this same page). We can't rely on that during scans that drop pins eagerly
+ * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
+ * the page LSN having not changed since back when _bt_readpage saw the page.
*/
void
_bt_killitems(IndexScanDesc scan)
{
+ Relation rel = scan->indexRelation;
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Page page;
BTPageOpaque opaque;
OffsetNumber minoff;
OffsetNumber maxoff;
- int i;
int numKilled = so->numKilled;
bool killedsomething = false;
- bool droppedpin PG_USED_FOR_ASSERTS_ONLY;
+ Assert(numKilled > 0);
Assert(BTScanPosIsValid(so->currPos));
+ Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
- /*
- * Always reset the scan state, so we don't look for same items on other
- * pages.
- */
+ /* Always invalidate so->killedItems[] before leaving so->currPos */
so->numKilled = 0;
- if (BTScanPosIsPinned(so->currPos))
+ if (!so->dropPin)
{
/*
* We have held the pin on this page since we read the index tuples,
* so all we need to do is lock it. The pin will have prevented
- * re-use of any TID on the page, so there is no need to check the
- * LSN.
+ * concurrent VACUUMs from recycling any of the TIDs on the page.
*/
- droppedpin = false;
- _bt_lockbuf(scan->indexRelation, so->currPos.buf, BT_READ);
-
- page = BufferGetPage(so->currPos.buf);
+ Assert(BTScanPosIsPinned(so->currPos));
+ _bt_lockbuf(rel, so->currPos.buf, BT_READ);
}
else
{
Buffer buf;
+ XLogRecPtr latestlsn;
- droppedpin = true;
- /* Attempt to re-read the buffer, getting pin and lock. */
- buf = _bt_getbuf(scan->indexRelation, so->currPos.currPage, BT_READ);
+ Assert(!BTScanPosIsPinned(so->currPos));
+ Assert(RelationNeedsWAL(rel));
+ buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
- page = BufferGetPage(buf);
- if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
- so->currPos.buf = buf;
- else
+ latestlsn = BufferGetLSNAtomic(buf);
+ Assert(!XLogRecPtrIsInvalid(so->currPos.lsn));
+ Assert(so->currPos.lsn <= latestlsn);
+ if (so->currPos.lsn != latestlsn)
{
- /* Modified while not pinned means hinting is not safe. */
- _bt_relbuf(scan->indexRelation, buf);
+ /* Modified, give up on hinting */
+ _bt_relbuf(rel, buf);
return;
}
+
+ /* Unmodified, hinting is safe */
+ so->currPos.buf = buf;
}
+ page = BufferGetPage(so->currPos.buf);
opaque = BTPageGetOpaque(page);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
- for (i = 0; i < numKilled; i++)
+ for (int i = 0; i < numKilled; i++)
{
int itemIndex = so->killedItems[i];
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
@@ -3442,7 +3431,7 @@ _bt_killitems(IndexScanDesc scan)
* correctness.
*
* Note that the page may have been modified in almost any way
- * since we first read it (in the !droppedpin case), so it's
+ * since we first read it (in the !so->dropPin case), so it's
* possible that this posting list tuple wasn't a posting list
* tuple when we first encountered its heap TIDs.
*/
@@ -3458,7 +3447,7 @@ _bt_killitems(IndexScanDesc scan)
* though only in the common case where the page can't
* have been concurrently modified
*/
- Assert(kitem->indexOffset == offnum || !droppedpin);
+ Assert(kitem->indexOffset == offnum || !so->dropPin);
/*
* Read-ahead to later kitems here.
@@ -3525,7 +3514,7 @@ _bt_killitems(IndexScanDesc scan)
MarkBufferDirtyHint(so->currPos.buf, true);
}
- _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
+ _bt_unlockbuf(rel, so->currPos.buf);
}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 5fbbcdaabb1..c95eb945016 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -1065,16 +1065,41 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
/* Check that the chosen locales are valid, and get canonical spellings */
if (!check_locale(LC_COLLATE, dbcollate, &canonname))
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
- errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
+ {
+ if (dblocprovider == COLLPROVIDER_BUILTIN)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
+ errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
+ else if (dblocprovider == COLLPROVIDER_ICU)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
+ errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate)));
+ }
dbcollate = canonname;
if (!check_locale(LC_CTYPE, dbctype, &canonname))
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
- errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
+ {
+ if (dblocprovider == COLLPROVIDER_BUILTIN)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
+ errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
+ else if (dblocprovider == COLLPROVIDER_ICU)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
+ errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype)));
+ }
+
dbctype = canonname;
check_encoding_locale_matches(encoding, dbcollate, dbctype);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index acf11e83c04..ea96947d813 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -430,8 +430,8 @@ static void AlterConstrUpdateConstraintEntry(ATAlterConstraint *cmdcon, Relation
static ObjectAddress ATExecValidateConstraint(List **wqueue,
Relation rel, char *constrName,
bool recurse, bool recursing, LOCKMODE lockmode);
-static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
- HeapTuple contuple, LOCKMODE lockmode);
+static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
+ Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode);
static void QueueCheckConstraintValidation(List **wqueue, Relation conrel, Relation rel,
char *constrName, HeapTuple contuple,
bool recurse, bool recursing, LOCKMODE lockmode);
@@ -11858,6 +11858,7 @@ AttachPartitionForeignKey(List **wqueue,
if (queueValidation)
{
Relation conrel;
+ Oid confrelid;
conrel = table_open(ConstraintRelationId, RowExclusiveLock);
@@ -11865,9 +11866,11 @@ AttachPartitionForeignKey(List **wqueue,
if (!HeapTupleIsValid(partcontup))
elog(ERROR, "cache lookup failed for constraint %u", partConstrOid);
+ confrelid = ((Form_pg_constraint) GETSTRUCT(partcontup))->confrelid;
+
/* Use the same lock as for AT_ValidateConstraint */
- QueueFKConstraintValidation(wqueue, conrel, partition, partcontup,
- ShareUpdateExclusiveLock);
+ QueueFKConstraintValidation(wqueue, conrel, partition, confrelid,
+ partcontup, ShareUpdateExclusiveLock);
ReleaseSysCache(partcontup);
table_close(conrel, RowExclusiveLock);
}
@@ -12463,9 +12466,12 @@ ATExecAlterConstrEnforceability(List **wqueue, ATAlterConstraint *cmdcon,
/*
* Tell Phase 3 to check that the constraint is satisfied by existing
- * rows.
+ * rows. Only applies to leaf partitions, and (for constraints that
+ * reference a partitioned table) only if this is not one of the
+ * pg_constraint rows that exist solely to support action triggers.
*/
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ if (rel->rd_rel->relkind == RELKIND_RELATION &&
+ currcon->confrelid == pkrelid)
{
AlteredTableInfo *tab;
NewConstraint *newcon;
@@ -12919,7 +12925,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
{
if (con->contype == CONSTRAINT_FOREIGN)
{
- QueueFKConstraintValidation(wqueue, conrel, rel, tuple, lockmode);
+ QueueFKConstraintValidation(wqueue, conrel, rel, con->confrelid,
+ tuple, lockmode);
}
else if (con->contype == CONSTRAINT_CHECK)
{
@@ -12952,8 +12959,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
* for the specified relation and all its children.
*/
static void
-QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
- HeapTuple contuple, LOCKMODE lockmode)
+QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
+ Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode)
{
Form_pg_constraint con;
AlteredTableInfo *tab;
@@ -12964,7 +12971,17 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
Assert(con->contype == CONSTRAINT_FOREIGN);
Assert(!con->convalidated);
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ /*
+ * Add the validation to phase 3's queue; not needed for partitioned
+ * tables themselves, only for their partitions.
+ *
+ * When the referenced table (pkrelid) is partitioned, the referencing
+ * table (fkrel) has one pg_constraint row pointing to each partition
+ * thereof. These rows are there only to support action triggers and no
+ * table scan is needed, therefore skip this for them as well.
+ */
+ if (fkrel->rd_rel->relkind == RELKIND_RELATION &&
+ con->confrelid == pkrelid)
{
NewConstraint *newcon;
Constraint *fkconstraint;
@@ -12983,15 +13000,16 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
newcon->qual = (Node *) fkconstraint;
/* Find or create work queue entry for this table */
- tab = ATGetQueueEntry(wqueue, rel);
+ tab = ATGetQueueEntry(wqueue, fkrel);
tab->constraints = lappend(tab->constraints, newcon);
}
/*
* If the table at either end of the constraint is partitioned, we need to
- * recurse and handle every constraint that is a child of this constraint.
+ * recurse and handle every unvalidate constraint that is a child of this
+ * constraint.
*/
- if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
+ if (fkrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
get_rel_relkind(con->confrelid) == RELKIND_PARTITIONED_TABLE)
{
ScanKeyData pkey;
@@ -13023,8 +13041,12 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
childrel = table_open(childcon->conrelid, lockmode);
- QueueFKConstraintValidation(wqueue, conrel, childrel, childtup,
- lockmode);
+ /*
+ * NB: Note that pkrelid should be passed as-is during recursion,
+ * as it is required to identify the root referenced table.
+ */
+ QueueFKConstraintValidation(wqueue, conrel, childrel, pkrelid,
+ childtup, lockmode);
table_close(childrel, NoLock);
}
@@ -13032,7 +13054,11 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
}
/*
- * Now update the catalog, while we have the door open.
+ * Now mark the pg_constraint row as validated (even if we didn't check,
+ * notably the ones for partitions on the referenced side).
+ *
+ * We rely on transaction abort to roll back this change if phase 3
+ * ultimately finds violating rows. This is a bit ugly.
*/
copyTuple = heap_copytuple(contuple);
copy_con = (Form_pg_constraint) GETSTRUCT(copyTuple);
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 10677da56b2..1c3c051403d 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -1016,7 +1016,7 @@ logicalrep_launcher_attach_dshmem(void)
last_start_times_dsa = dsa_attach(LogicalRepCtx->last_start_dsa);
dsa_pin_mapping(last_start_times_dsa);
last_start_times = dshash_attach(last_start_times_dsa, &dsh_params,
- LogicalRepCtx->last_start_dsh, 0);
+ LogicalRepCtx->last_start_dsh, NULL);
}
MemoryContextSwitchTo(oldcontext);
diff --git a/src/backend/storage/aio/method_io_uring.c b/src/backend/storage/aio/method_io_uring.c
index c719ba2727a..cc312b641ca 100644
--- a/src/backend/storage/aio/method_io_uring.c
+++ b/src/backend/storage/aio/method_io_uring.c
@@ -126,7 +126,7 @@ pgaio_uring_shmem_size(void)
static void
pgaio_uring_shmem_init(bool first_time)
{
- int TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS - MAX_IO_WORKERS;
+ int TotalProcs = pgaio_uring_procs();
bool found;
pgaio_uring_contexts = (PgAioUringContext *)
diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c
index 2e33293b000..53e7d534270 100644
--- a/src/backend/utils/activity/pgstat_shmem.c
+++ b/src/backend/utils/activity/pgstat_shmem.c
@@ -183,7 +183,7 @@ StatsShmemInit(void)
p += MAXALIGN(pgstat_dsa_init_size());
dsa = dsa_create_in_place(ctl->raw_dsa_area,
pgstat_dsa_init_size(),
- LWTRANCHE_PGSTATS_DSA, 0);
+ LWTRANCHE_PGSTATS_DSA, NULL);
dsa_pin(dsa);
/*
@@ -255,7 +255,8 @@ pgstat_attach_shmem(void)
dsa_pin_mapping(pgStatLocal.dsa);
pgStatLocal.shared_hash = dshash_attach(pgStatLocal.dsa, &dsh_params,
- pgStatLocal.shmem->hash_handle, 0);
+ pgStatLocal.shmem->hash_handle,
+ NULL);
MemoryContextSwitchTo(oldcontext);
}
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 603632581d0..4bb84ff7087 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -99,6 +99,14 @@ load_external_function(const char *filename, const char *funcname,
void *lib_handle;
void *retval;
+ /*
+ * If the value starts with "$libdir/", strip that. This is because many
+ * extensions have hardcoded '$libdir/foo' as their library name, which
+ * prevents using the path.
+ */
+ if (strncmp(filename, "$libdir/", 8) == 0)
+ filename += 8;
+
/* Expand the possibly-abbreviated filename to an exact path name */
fullname = expand_dynamic_library_name(filename);
@@ -456,14 +464,6 @@ expand_dynamic_library_name(const char *name)
Assert(name);
- /*
- * If the value starts with "$libdir/", strip that. This is because many
- * extensions have hardcoded '$libdir/foo' as their library name, which
- * prevents using the path.
- */
- if (strncmp(name, "$libdir/", 8) == 0)
- name += 8;
-
have_slash = (first_dir_separator(name) != NULL);
if (!have_slash)
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 3e4e444f3fd..47352b7faed 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -1867,6 +1867,21 @@ ExecQueryAndProcessResults(const char *query,
{
FILE *copy_stream = NULL;
+ if (pset.piped_syncs > 1)
+ {
+ /*
+ * When reading COPY data, the backend ignores sync messages
+ * and will not send a matching ReadyForQuery response. Even
+ * if we adjust piped_syncs and requested_results, it is not
+ * possible to salvage this as the sync message would still be
+ * in libpq's command queue and we would be stuck in a busy
+ * pipeline state. Thus, we abort the connection to avoid
+ * this state.
+ */
+ pg_log_info("\\syncpipeline after COPY is not supported, aborting connection");
+ exit(EXIT_BADCONN);
+ }
+
/*
* For COPY OUT, direct the output to the default place (probably
* a pager pipe) for \watch, or to pset.copyStream for \copy,
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 1d08268393e..24e0100c9f0 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -6188,8 +6188,8 @@ listExtensions(const char *pattern)
"FROM pg_catalog.pg_extension e "
"LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace "
"LEFT JOIN pg_catalog.pg_description d ON d.objoid = e.oid "
- "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname "
- "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass\n",
+ "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass "
+ "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname\n",
gettext_noop("Name"),
gettext_noop("Version"),
gettext_noop("Default version"),
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 4050f9a5e3e..ae5c1d66405 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -513,15 +513,33 @@ SELECT 'val1' \\bind \\sendpipeline
qr/server closed the connection unexpectedly/,
'protocol sync loss in pipeline: bind COPY, SELECT, sync and getresult');
-# This time, test without the \getresults.
+# This time, test without the \getresults and \syncpipeline.
psql_fails_like(
$node,
qq{\\startpipeline
COPY psql_pipeline FROM STDIN;
SELECT 'val1';
-\\syncpipeline
\\endpipeline},
qr/server closed the connection unexpectedly/,
'protocol sync loss in pipeline: COPY, SELECT and sync');
+# Tests sending a sync after a COPY TO/FROM. These abort the connection
+# from the frontend.
+psql_fails_like(
+ $node,
+ qq{\\startpipeline
+COPY psql_pipeline FROM STDIN;
+\\syncpipeline
+\\endpipeline},
+ qr/\\syncpipeline after COPY is not supported, aborting connection/,
+ 'sending sync after COPY FROM');
+psql_fails_like(
+ $node,
+ qq{\\startpipeline
+COPY psql_pipeline TO STDOUT;
+\\syncpipeline
+\\endpipeline},
+ qr/\\syncpipeline after COPY is not supported, aborting connection/,
+ 'sending sync after COPY TO');
+
done_testing();
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index ebca02588d3..e709d2e0afe 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -939,7 +939,7 @@ typedef BTVacuumPostingData *BTVacuumPosting;
* processing. This approach minimizes lock/unlock traffic. We must always
* drop the lock to make it okay for caller to process the returned items.
* Whether or not we can also release the pin during this window will vary.
- * We drop the pin eagerly (when safe) to avoid blocking progress by VACUUM
+ * We drop the pin (when so->dropPin) to avoid blocking progress by VACUUM
* (see nbtree/README section about making concurrent TID recycling safe).
* We'll always release both the lock and the pin on the current page before
* moving on to its sibling page.
@@ -967,7 +967,7 @@ typedef struct BTScanPosData
BlockNumber currPage; /* page referenced by items array */
BlockNumber prevPage; /* currPage's left link */
BlockNumber nextPage; /* currPage's right link */
- XLogRecPtr lsn; /* currPage's LSN */
+ XLogRecPtr lsn; /* currPage's LSN (when so->dropPin) */
/* scan direction for the saved position's call to _bt_readpage */
ScanDirection dir;
@@ -1070,6 +1070,7 @@ typedef struct BTScanOpaqueData
/* info about killed items if any (killedItems is NULL if never used) */
int *killedItems; /* currPos.items indexes of killed items */
int numKilled; /* number of currently stored items */
+ bool dropPin; /* drop leaf pin before btgettuple returns? */
/*
* If we are doing an index-only scan, these are the tuple storage
diff --git a/src/test/modules/test_dsm_registry/test_dsm_registry.c b/src/test/modules/test_dsm_registry/test_dsm_registry.c
index 462a80f8790..96a890be228 100644
--- a/src/test/modules/test_dsm_registry/test_dsm_registry.c
+++ b/src/test/modules/test_dsm_registry/test_dsm_registry.c
@@ -54,7 +54,7 @@ set_val_in_shmem(PG_FUNCTION_ARGS)
tdr_attach_shmem();
LWLockAcquire(&tdr_state->lck, LW_EXCLUSIVE);
- tdr_state->val = PG_GETARG_UINT32(0);
+ tdr_state->val = PG_GETARG_INT32(0);
LWLockRelease(&tdr_state->lck);
PG_RETURN_VOID();
@@ -72,5 +72,5 @@ get_val_in_shmem(PG_FUNCTION_ARGS)
ret = tdr_state->val;
LWLockRelease(&tdr_state->lck);
- PG_RETURN_UINT32(ret);
+ PG_RETURN_INT32(ret);
}
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index 4f3f280a439..6a8f3959345 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -1895,29 +1895,76 @@ WHERE conrelid::regclass::text like 'fk_partitioned_fk%' ORDER BY oid::regclass:
(5 rows)
DROP TABLE fk_partitioned_fk, fk_notpartitioned_pk;
--- NOT VALID foreign key on a non-partitioned table referencing a partitioned table
+-- NOT VALID and NOT ENFORCED foreign key on a non-partitioned table
+-- referencing a partitioned table
CREATE TABLE fk_partitioned_pk (a int, b int, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b);
CREATE TABLE fk_partitioned_pk_1 PARTITION OF fk_partitioned_pk FOR VALUES FROM (0,0) TO (1000,1000);
+CREATE TABLE fk_partitioned_pk_2 PARTITION OF fk_partitioned_pk FOR VALUES FROM (1000,1000) TO (2000,2000);
CREATE TABLE fk_notpartitioned_fk (b int, a int);
-ALTER TABLE fk_notpartitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
--- Constraint will be invalid.
-SELECT conname, convalidated FROM pg_constraint
+INSERT INTO fk_partitioned_pk VALUES(100,100), (1000,1000);
+INSERT INTO fk_notpartitioned_fk VALUES(100,100), (1000,1000);
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey2
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT ENFORCED;
+-- All constraints will be invalid, and _fkey2 constraints will not be enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
- conname | convalidated
----------------------------------+--------------
- fk_notpartitioned_fk_a_b_fkey | f
- fk_notpartitioned_fk_a_b_fkey_1 | f
-(2 rows)
+ conname | conenforced | convalidated
+----------------------------------+-------------+--------------
+ fk_notpartitioned_fk_a_b_fkey | t | f
+ fk_notpartitioned_fk_a_b_fkey_1 | t | f
+ fk_notpartitioned_fk_a_b_fkey_2 | t | f
+ fk_notpartitioned_fk_a_b_fkey2 | f | f
+ fk_notpartitioned_fk_a_b_fkey2_1 | f | f
+ fk_notpartitioned_fk_a_b_fkey2_2 | f | f
+(6 rows)
ALTER TABLE fk_notpartitioned_fk VALIDATE CONSTRAINT fk_notpartitioned_fk_a_b_fkey;
--- All constraints are now valid.
-SELECT conname, convalidated FROM pg_constraint
+ALTER TABLE fk_notpartitioned_fk ALTER CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 ENFORCED;
+-- All constraints are now valid and enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
- conname | convalidated
----------------------------------+--------------
- fk_notpartitioned_fk_a_b_fkey | t
- fk_notpartitioned_fk_a_b_fkey_1 | t
-(2 rows)
+ conname | conenforced | convalidated
+----------------------------------+-------------+--------------
+ fk_notpartitioned_fk_a_b_fkey | t | t
+ fk_notpartitioned_fk_a_b_fkey_1 | t | t
+ fk_notpartitioned_fk_a_b_fkey_2 | t | t
+ fk_notpartitioned_fk_a_b_fkey2 | t | t
+ fk_notpartitioned_fk_a_b_fkey2_1 | t | t
+ fk_notpartitioned_fk_a_b_fkey2_2 | t | t
+(6 rows)
+
+-- test a self-referential FK
+ALTER TABLE fk_partitioned_pk ADD CONSTRAINT selffk FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+CREATE TABLE fk_partitioned_pk_3 PARTITION OF fk_partitioned_pk FOR VALUES FROM (2000,2000) TO (3000,3000)
+ PARTITION BY RANGE (a);
+CREATE TABLE fk_partitioned_pk_3_1 PARTITION OF fk_partitioned_pk_3 FOR VALUES FROM (2000) TO (2100);
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ conname | conenforced | convalidated
+------------+-------------+--------------
+ selffk | t | f
+ selffk_1 | t | f
+ selffk_2 | t | f
+ selffk_3 | t | f
+ selffk_3_1 | t | f
+(5 rows)
+
+ALTER TABLE fk_partitioned_pk_2 VALIDATE CONSTRAINT selffk;
+ALTER TABLE fk_partitioned_pk VALIDATE CONSTRAINT selffk;
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ conname | conenforced | convalidated
+------------+-------------+--------------
+ selffk | t | t
+ selffk_1 | t | t
+ selffk_2 | t | t
+ selffk_3 | t | t
+ selffk_3_1 | t | t
+(5 rows)
DROP TABLE fk_notpartitioned_fk, fk_partitioned_pk;
-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index f245d7f1549..2bf0e77d61e 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -3535,8 +3535,8 @@ drop table parent, child;
drop function f();
-- Test who runs deferred trigger functions
-- setup
-create role regress_groot;
-create role regress_outis;
+create role regress_caller;
+create role regress_fn_owner;
create function whoami() returns trigger language plpgsql
as $$
begin
@@ -3544,7 +3544,7 @@ begin
return null;
end;
$$;
-alter function whoami() owner to regress_outis;
+alter function whoami() owner to regress_fn_owner;
create table defer_trig (id integer);
grant insert on defer_trig to public;
create constraint trigger whoami after insert on defer_trig
@@ -3553,23 +3553,23 @@ create constraint trigger whoami after insert on defer_trig
execute function whoami();
-- deferred triggers must run as the user that queued the trigger
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (1);
reset role;
-set role regress_outis;
+set role regress_fn_owner;
insert into defer_trig values (2);
reset role;
commit;
-NOTICE: I am regress_groot
-NOTICE: I am regress_outis
+NOTICE: I am regress_caller
+NOTICE: I am regress_fn_owner
-- security definer functions override the user who queued the trigger
alter function whoami() security definer;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (3);
reset role;
commit;
-NOTICE: I am regress_outis
+NOTICE: I am regress_fn_owner
alter function whoami() security invoker;
-- make sure the current user is restored after error
create or replace function whoami() returns trigger language plpgsql
@@ -3581,11 +3581,11 @@ begin
end;
$$;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (4);
reset role;
commit; -- error expected
-NOTICE: I am regress_groot
+NOTICE: I am regress_caller
ERROR: division by zero
CONTEXT: SQL statement "SELECT 1 / 0"
PL/pgSQL function whoami() line 4 at PERFORM
@@ -3598,5 +3598,5 @@ select current_user = session_user;
-- clean up
drop table defer_trig;
drop function whoami();
-drop role regress_outis;
-drop role regress_groot;
+drop role regress_fn_owner;
+drop role regress_caller;
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index 8159e363022..cfcecb4e911 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -1389,22 +1389,44 @@ WHERE conrelid::regclass::text like 'fk_partitioned_fk%' ORDER BY oid::regclass:
DROP TABLE fk_partitioned_fk, fk_notpartitioned_pk;
--- NOT VALID foreign key on a non-partitioned table referencing a partitioned table
+-- NOT VALID and NOT ENFORCED foreign key on a non-partitioned table
+-- referencing a partitioned table
CREATE TABLE fk_partitioned_pk (a int, b int, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b);
CREATE TABLE fk_partitioned_pk_1 PARTITION OF fk_partitioned_pk FOR VALUES FROM (0,0) TO (1000,1000);
+CREATE TABLE fk_partitioned_pk_2 PARTITION OF fk_partitioned_pk FOR VALUES FROM (1000,1000) TO (2000,2000);
CREATE TABLE fk_notpartitioned_fk (b int, a int);
-ALTER TABLE fk_notpartitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
-
--- Constraint will be invalid.
-SELECT conname, convalidated FROM pg_constraint
+INSERT INTO fk_partitioned_pk VALUES(100,100), (1000,1000);
+INSERT INTO fk_notpartitioned_fk VALUES(100,100), (1000,1000);
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey2
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT ENFORCED;
+
+-- All constraints will be invalid, and _fkey2 constraints will not be enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
ALTER TABLE fk_notpartitioned_fk VALIDATE CONSTRAINT fk_notpartitioned_fk_a_b_fkey;
+ALTER TABLE fk_notpartitioned_fk ALTER CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 ENFORCED;
--- All constraints are now valid.
-SELECT conname, convalidated FROM pg_constraint
+-- All constraints are now valid and enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
+-- test a self-referential FK
+ALTER TABLE fk_partitioned_pk ADD CONSTRAINT selffk FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+CREATE TABLE fk_partitioned_pk_3 PARTITION OF fk_partitioned_pk FOR VALUES FROM (2000,2000) TO (3000,3000)
+ PARTITION BY RANGE (a);
+CREATE TABLE fk_partitioned_pk_3_1 PARTITION OF fk_partitioned_pk_3 FOR VALUES FROM (2000) TO (2100);
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ALTER TABLE fk_partitioned_pk_2 VALIDATE CONSTRAINT selffk;
+ALTER TABLE fk_partitioned_pk VALIDATE CONSTRAINT selffk;
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+
DROP TABLE fk_notpartitioned_fk, fk_partitioned_pk;
-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index d3d242dd29b..9ffd318385f 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -2701,8 +2701,8 @@ drop function f();
-- Test who runs deferred trigger functions
-- setup
-create role regress_groot;
-create role regress_outis;
+create role regress_caller;
+create role regress_fn_owner;
create function whoami() returns trigger language plpgsql
as $$
begin
@@ -2710,7 +2710,7 @@ begin
return null;
end;
$$;
-alter function whoami() owner to regress_outis;
+alter function whoami() owner to regress_fn_owner;
create table defer_trig (id integer);
grant insert on defer_trig to public;
@@ -2721,10 +2721,10 @@ create constraint trigger whoami after insert on defer_trig
-- deferred triggers must run as the user that queued the trigger
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (1);
reset role;
-set role regress_outis;
+set role regress_fn_owner;
insert into defer_trig values (2);
reset role;
commit;
@@ -2732,7 +2732,7 @@ commit;
-- security definer functions override the user who queued the trigger
alter function whoami() security definer;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (3);
reset role;
commit;
@@ -2749,7 +2749,7 @@ end;
$$;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (4);
reset role;
commit; -- error expected
@@ -2758,5 +2758,5 @@ select current_user = session_user;
-- clean up
drop table defer_trig;
drop function whoami();
-drop role regress_outis;
-drop role regress_groot;
+drop role regress_fn_owner;
+drop role regress_caller;