aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/aio/aio.c27
-rw-r--r--src/backend/storage/aio/aio_callback.c7
-rw-r--r--src/backend/storage/aio/method_io_uring.c218
-rw-r--r--src/backend/storage/aio/method_worker.c7
-rw-r--r--src/backend/storage/buffer/bufmgr.c10
-rw-r--r--src/backend/storage/buffer/localbuf.c23
-rw-r--r--src/backend/storage/file/fd.c19
-rw-r--r--src/backend/storage/ipc/dsm_registry.c314
-rw-r--r--src/backend/storage/ipc/ipci.c3
-rw-r--r--src/backend/storage/ipc/procsignal.c3
-rw-r--r--src/backend/storage/ipc/shmem.c4
-rw-r--r--src/backend/storage/ipc/standby.c4
-rw-r--r--src/backend/storage/lmgr/lmgr.c6
-rw-r--r--src/backend/storage/lmgr/lock.c22
-rw-r--r--src/backend/storage/lmgr/lwlock.c2
-rw-r--r--src/backend/storage/lmgr/predicate.c11
-rw-r--r--src/backend/storage/lmgr/proc.c1
17 files changed, 596 insertions, 85 deletions
diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c
index c64d815ebd1..3643f27ad6e 100644
--- a/src/backend/storage/aio/aio.c
+++ b/src/backend/storage/aio/aio.c
@@ -556,6 +556,13 @@ bool
pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state)
{
*state = ioh->state;
+
+ /*
+ * Ensure that we don't see an earlier state of the handle than ioh->state
+ * due to compiler or CPU reordering. This protects both ->generation as
+ * directly used here, and other fields in the handle accessed in the
+ * caller if the handle was not reused.
+ */
pg_read_barrier();
return ioh->generation != ref_generation;
@@ -752,7 +759,7 @@ pgaio_io_wait_for_free(void)
{
int reclaimed = 0;
- pgaio_debug(DEBUG2, "waiting for free IO with %d pending, %d in-flight, %d idle IOs",
+ pgaio_debug(DEBUG2, "waiting for free IO with %d pending, %u in-flight, %u idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
dclist_count(&pgaio_my_backend->idle_ios));
@@ -773,7 +780,12 @@ pgaio_io_wait_for_free(void)
* Note that no interrupts are processed between the state check
* and the call to reclaim - that's important as otherwise an
* interrupt could have already reclaimed the handle.
+ *
+ * Need to ensure that there's no reordering, in the more common
+ * paths, where we wait for IO, that's done by
+ * pgaio_io_was_recycled().
*/
+ pg_read_barrier();
pgaio_io_reclaim(ioh);
reclaimed++;
}
@@ -797,7 +809,7 @@ pgaio_io_wait_for_free(void)
if (dclist_count(&pgaio_my_backend->in_flight_ios) == 0)
ereport(ERROR,
errmsg_internal("no free IOs despite no in-flight IOs"),
- errdetail_internal("%d pending, %d in-flight, %d idle IOs",
+ errdetail_internal("%d pending, %u in-flight, %u idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
dclist_count(&pgaio_my_backend->idle_ios)));
@@ -828,7 +840,7 @@ pgaio_io_wait_for_free(void)
case PGAIO_HS_COMPLETED_IO:
case PGAIO_HS_SUBMITTED:
pgaio_debug_io(DEBUG2, ioh,
- "waiting for free io with %d in flight",
+ "waiting for free io with %u in flight",
dclist_count(&pgaio_my_backend->in_flight_ios));
/*
@@ -852,7 +864,12 @@ pgaio_io_wait_for_free(void)
* check and the call to reclaim - that's important as
* otherwise an interrupt could have already reclaimed the
* handle.
+ *
+ * Need to ensure that there's no reordering, in the more
+ * common paths, where we wait for IO, that's done by
+ * pgaio_io_was_recycled().
*/
+ pg_read_barrier();
pgaio_io_reclaim(ioh);
break;
}
@@ -1252,7 +1269,7 @@ pgaio_closing_fd(int fd)
break;
pgaio_debug_io(DEBUG2, ioh,
- "waiting for IO before FD %d gets closed, %d in-flight IOs",
+ "waiting for IO before FD %d gets closed, %u in-flight IOs",
fd, dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
@@ -1288,7 +1305,7 @@ pgaio_shutdown(int code, Datum arg)
uint64 generation = ioh->generation;
pgaio_debug_io(DEBUG2, ioh,
- "waiting for IO to complete during shutdown, %d in-flight IOs",
+ "waiting for IO to complete during shutdown, %u in-flight IOs",
dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
diff --git a/src/backend/storage/aio/aio_callback.c b/src/backend/storage/aio/aio_callback.c
index 0ad9795bb7e..03c9bba0802 100644
--- a/src/backend/storage/aio/aio_callback.c
+++ b/src/backend/storage/aio/aio_callback.c
@@ -256,6 +256,9 @@ pgaio_io_call_complete_shared(PgAioHandle *ioh)
pgaio_result_status_string(result.status),
result.id, result.error_data, result.result);
result = ce->cb->complete_shared(ioh, result, cb_data);
+
+ /* the callback should never transition to unknown */
+ Assert(result.status != PGAIO_RS_UNKNOWN);
}
ioh->distilled_result = result;
@@ -290,6 +293,7 @@ pgaio_io_call_complete_local(PgAioHandle *ioh)
/* start with distilled result from shared callback */
result = ioh->distilled_result;
+ Assert(result.status != PGAIO_RS_UNKNOWN);
for (int i = ioh->num_callbacks; i > 0; i--)
{
@@ -306,6 +310,9 @@ pgaio_io_call_complete_local(PgAioHandle *ioh)
pgaio_result_status_string(result.status),
result.id, result.error_data, result.result);
result = ce->cb->complete_local(ioh, result, cb_data);
+
+ /* the callback should never transition to unknown */
+ Assert(result.status != PGAIO_RS_UNKNOWN);
}
/*
diff --git a/src/backend/storage/aio/method_io_uring.c b/src/backend/storage/aio/method_io_uring.c
index c719ba2727a..0a8c054162f 100644
--- a/src/backend/storage/aio/method_io_uring.c
+++ b/src/backend/storage/aio/method_io_uring.c
@@ -29,6 +29,9 @@
#ifdef IOMETHOD_IO_URING_ENABLED
+#include <sys/mman.h>
+#include <unistd.h>
+
#include <liburing.h>
#include "miscadmin.h"
@@ -94,12 +97,32 @@ PgAioUringContext
struct io_uring io_uring_ring;
} PgAioUringContext;
+/*
+ * Information about the capabilities that io_uring has.
+ *
+ * Depending on liburing and kernel version different features are
+ * supported. At least for the kernel a kernel version check does not suffice
+ * as various vendors do backport features to older kernels :(.
+ */
+typedef struct PgAioUringCaps
+{
+ bool checked;
+ /* -1 if io_uring_queue_init_mem() is unsupported */
+ int mem_init_size;
+} PgAioUringCaps;
+
+
/* PgAioUringContexts for all backends */
static PgAioUringContext *pgaio_uring_contexts;
/* the current backend's context */
static PgAioUringContext *pgaio_my_uring_context;
+static PgAioUringCaps pgaio_uring_caps =
+{
+ .checked = false,
+ .mem_init_size = -1,
+};
static uint32
pgaio_uring_procs(void)
@@ -111,30 +134,184 @@ pgaio_uring_procs(void)
return MaxBackends + NUM_AUXILIARY_PROCS - MAX_IO_WORKERS;
}
-static Size
+/*
+ * Initializes pgaio_uring_caps, unless that's already done.
+ */
+static void
+pgaio_uring_check_capabilities(void)
+{
+ if (pgaio_uring_caps.checked)
+ return;
+
+ /*
+ * By default io_uring creates a shared memory mapping for each io_uring
+ * instance, leading to a large number of memory mappings. Unfortunately a
+ * large number of memory mappings slows things down, backend exit is
+ * particularly affected. To address that, newer kernels (6.5) support
+ * using user-provided memory for the memory, by putting the relevant
+ * memory into shared memory we don't need any additional mappings.
+ *
+ * To know whether this is supported, we unfortunately need to probe the
+ * kernel by trying to create a ring with userspace-provided memory. This
+ * also has a secondary benefit: We can determine precisely how much
+ * memory we need for each io_uring instance.
+ */
+#if defined(HAVE_LIBURING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
+ {
+ struct io_uring test_ring;
+ size_t ring_size;
+ void *ring_ptr;
+ struct io_uring_params p = {0};
+ int ret;
+
+ /*
+ * Liburing does not yet provide an API to query how much memory a
+ * ring will need. So we over-estimate it here. As the memory is freed
+ * just below that's small temporary waste of memory.
+ *
+ * 1MB is more than enough for rings within io_max_concurrency's
+ * range.
+ */
+ ring_size = 1024 * 1024;
+
+ /*
+ * Hard to believe a system exists where 1MB would not be a multiple
+ * of the page size. But it's cheap to ensure...
+ */
+ ring_size -= ring_size % sysconf(_SC_PAGESIZE);
+
+ ring_ptr = mmap(NULL, ring_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (ring_ptr == MAP_FAILED)
+ elog(ERROR,
+ "mmap(%zu) to determine io_uring_queue_init_mem() support failed: %m",
+ ring_size);
+
+ ret = io_uring_queue_init_mem(io_max_concurrency, &test_ring, &p, ring_ptr, ring_size);
+ if (ret > 0)
+ {
+ pgaio_uring_caps.mem_init_size = ret;
+
+ elog(DEBUG1,
+ "can use combined memory mapping for io_uring, each ring needs %d bytes",
+ ret);
+
+ /* clean up the created ring, it was just for a test */
+ io_uring_queue_exit(&test_ring);
+ }
+ else
+ {
+ /*
+ * There are different reasons for ring creation to fail, but it's
+ * ok to treat that just as io_uring_queue_init_mem() not being
+ * supported. We'll report a more detailed error in
+ * pgaio_uring_shmem_init().
+ */
+ errno = -ret;
+ elog(DEBUG1,
+ "cannot use combined memory mapping for io_uring, ring creation failed: %m");
+
+ }
+
+ if (munmap(ring_ptr, ring_size) != 0)
+ elog(ERROR, "munmap() failed: %m");
+ }
+#else
+ {
+ elog(DEBUG1,
+ "can't use combined memory mapping for io_uring, kernel or liburing too old");
+ }
+#endif
+
+ pgaio_uring_caps.checked = true;
+}
+
+/*
+ * Memory for all PgAioUringContext instances
+ */
+static size_t
pgaio_uring_context_shmem_size(void)
{
return mul_size(pgaio_uring_procs(), sizeof(PgAioUringContext));
}
+/*
+ * Memory for the combined memory used by io_uring instances. Returns 0 if
+ * that is not supported by kernel/liburing.
+ */
+static size_t
+pgaio_uring_ring_shmem_size(void)
+{
+ size_t sz = 0;
+
+ if (pgaio_uring_caps.mem_init_size > 0)
+ {
+ /*
+ * Memory for rings needs to be allocated to the page boundary,
+ * reserve space. Luckily it does not need to be aligned to hugepage
+ * boundaries, even if huge pages are used.
+ */
+ sz = add_size(sz, sysconf(_SC_PAGESIZE));
+ sz = add_size(sz, mul_size(pgaio_uring_procs(),
+ pgaio_uring_caps.mem_init_size));
+ }
+
+ return sz;
+}
+
static size_t
pgaio_uring_shmem_size(void)
{
- return pgaio_uring_context_shmem_size();
+ size_t sz;
+
+ /*
+ * Kernel and liburing support for various features influences how much
+ * shmem we need, perform the necessary checks.
+ */
+ pgaio_uring_check_capabilities();
+
+ sz = pgaio_uring_context_shmem_size();
+ sz = add_size(sz, pgaio_uring_ring_shmem_size());
+
+ return sz;
}
static void
pgaio_uring_shmem_init(bool first_time)
{
- int TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS - MAX_IO_WORKERS;
+ int TotalProcs = pgaio_uring_procs();
bool found;
+ char *shmem;
+ size_t ring_mem_remain = 0;
+ char *ring_mem_next = 0;
- pgaio_uring_contexts = (PgAioUringContext *)
- ShmemInitStruct("AioUring", pgaio_uring_shmem_size(), &found);
-
+ /*
+ * We allocate memory for all PgAioUringContext instances and, if
+ * supported, the memory required for each of the io_uring instances, in
+ * one ShmemInitStruct().
+ */
+ shmem = ShmemInitStruct("AioUringContext", pgaio_uring_shmem_size(), &found);
if (found)
return;
+ pgaio_uring_contexts = (PgAioUringContext *) shmem;
+ shmem += pgaio_uring_context_shmem_size();
+
+ /* if supported, handle memory alignment / sizing for io_uring memory */
+ if (pgaio_uring_caps.mem_init_size > 0)
+ {
+ ring_mem_remain = pgaio_uring_ring_shmem_size();
+ ring_mem_next = (char *) shmem;
+
+ /* align to page boundary, see also pgaio_uring_ring_shmem_size() */
+ ring_mem_next = (char *) TYPEALIGN(sysconf(_SC_PAGESIZE), ring_mem_next);
+
+ /* account for alignment */
+ ring_mem_remain -= ring_mem_next - shmem;
+ shmem += ring_mem_next - shmem;
+
+ shmem += ring_mem_remain;
+ }
+
for (int contextno = 0; contextno < TotalProcs; contextno++)
{
PgAioUringContext *context = &pgaio_uring_contexts[contextno];
@@ -158,7 +335,28 @@ pgaio_uring_shmem_init(bool first_time)
* be worth using that - also need to evaluate if that causes
* noticeable additional contention?
*/
- ret = io_uring_queue_init(io_max_concurrency, &context->io_uring_ring, 0);
+
+ /*
+ * If supported (c.f. pgaio_uring_check_capabilities()), create ring
+ * with its data in shared memory. Otherwise fall back io_uring
+ * creating a memory mapping for each ring.
+ */
+#if defined(HAVE_LIBURING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
+ if (pgaio_uring_caps.mem_init_size > 0)
+ {
+ struct io_uring_params p = {0};
+
+ ret = io_uring_queue_init_mem(io_max_concurrency, &context->io_uring_ring, &p, ring_mem_next, ring_mem_remain);
+
+ ring_mem_remain -= ret;
+ ring_mem_next += ret;
+ }
+ else
+#endif
+ {
+ ret = io_uring_queue_init(io_max_concurrency, &context->io_uring_ring, 0);
+ }
+
if (ret < 0)
{
char *hint = NULL;
@@ -400,9 +598,9 @@ pgaio_uring_wait_one(PgAioHandle *ioh, uint64 ref_generation)
while (true)
{
pgaio_debug_io(DEBUG3, ioh,
- "wait_one io_gen: %llu, ref_gen: %llu, cycle %d",
- (long long unsigned) ioh->generation,
- (long long unsigned) ref_generation,
+ "wait_one io_gen: %" PRIu64 ", ref_gen: %" PRIu64 ", cycle %d",
+ ioh->generation,
+ ref_generation,
waited);
if (pgaio_io_was_recycled(ioh, ref_generation, &state) ||
diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c
index 743cccc2acd..36be179678d 100644
--- a/src/backend/storage/aio/method_worker.c
+++ b/src/backend/storage/aio/method_worker.c
@@ -461,7 +461,12 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len)
int nwakeups = 0;
int worker;
- /* Try to get a job to do. */
+ /*
+ * Try to get a job to do.
+ *
+ * The lwlock acquisition also provides the necessary memory barrier
+ * to ensure that we don't see an outdated data in the handle.
+ */
LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
if ((io_index = pgaio_worker_submission_queue_consume()) == UINT32_MAX)
{
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index f93131a645e..bd68d7e0ca9 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -4550,11 +4550,9 @@ DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
if (RelFileLocatorBackendIsTemp(rlocator))
{
if (rlocator.backend == MyProcNumber)
- {
- for (j = 0; j < nforks; j++)
- DropRelationLocalBuffers(rlocator.locator, forkNum[j],
- firstDelBlock[j]);
- }
+ DropRelationLocalBuffers(rlocator.locator, forkNum, nforks,
+ firstDelBlock);
+
return;
}
@@ -7320,7 +7318,7 @@ buffer_readv_report(PgAioResult result, const PgAioTargetData *td,
affected_count > 1 ?
errdetail("Block %u held first zeroed page.",
first + first_off) : 0,
- errhint("See server log for details about the other %u invalid block(s).",
+ errhint("See server log for details about the other %d invalid block(s).",
affected_count + checkfail_count - 1));
return;
}
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 63101d56a07..3da9c41ee1d 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -629,7 +629,7 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
*/
if (check_unreferenced &&
(LocalRefCount[bufid] != 0 || BUF_STATE_GET_REFCOUNT(buf_state) != 0))
- elog(ERROR, "block %u of %s is still referenced (local %u)",
+ elog(ERROR, "block %u of %s is still referenced (local %d)",
bufHdr->tag.blockNum,
relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
MyProcNumber,
@@ -660,10 +660,11 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
* See DropRelationBuffers in bufmgr.c for more notes.
*/
void
-DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
- BlockNumber firstDelBlock)
+DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber *forkNum,
+ int nforks, BlockNumber *firstDelBlock)
{
int i;
+ int j;
for (i = 0; i < NLocBuffer; i++)
{
@@ -672,12 +673,18 @@ DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
buf_state = pg_atomic_read_u32(&bufHdr->state);
- if ((buf_state & BM_TAG_VALID) &&
- BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
- BufTagGetForkNum(&bufHdr->tag) == forkNum &&
- bufHdr->tag.blockNum >= firstDelBlock)
+ if (!(buf_state & BM_TAG_VALID) ||
+ !BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
+ continue;
+
+ for (j = 0; j < nforks; j++)
{
- InvalidateLocalBuffer(bufHdr, true);
+ if (BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
+ bufHdr->tag.blockNum >= firstDelBlock[j])
+ {
+ InvalidateLocalBuffer(bufHdr, true);
+ break;
+ }
}
}
}
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 0e8299dd556..a4ec7959f31 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -400,25 +400,22 @@ pg_fsync(int fd)
* portable, even if it runs ok on the current system.
*
* We assert here that a descriptor for a file was opened with write
- * permissions (either O_RDWR or O_WRONLY) and for a directory without
- * write permissions (O_RDONLY).
+ * permissions (i.e., not O_RDONLY) and for a directory without write
+ * permissions (O_RDONLY). Notice that the assertion check is made even
+ * if fsync() is disabled.
*
- * Ignore any fstat errors and let the follow-up fsync() do its work.
- * Doing this sanity check here counts for the case where fsync() is
- * disabled.
+ * If fstat() fails, ignore it and let the follow-up fsync() complain.
*/
if (fstat(fd, &st) == 0)
{
int desc_flags = fcntl(fd, F_GETFL);
- /*
- * O_RDONLY is historically 0, so just make sure that for directories
- * no write flags are used.
- */
+ desc_flags &= O_ACCMODE;
+
if (S_ISDIR(st.st_mode))
- Assert((desc_flags & (O_RDWR | O_WRONLY)) == 0);
+ Assert(desc_flags == O_RDONLY);
else
- Assert((desc_flags & (O_RDWR | O_WRONLY)) != 0);
+ Assert(desc_flags != O_RDONLY);
}
errno = 0;
#endif
diff --git a/src/backend/storage/ipc/dsm_registry.c b/src/backend/storage/ipc/dsm_registry.c
index 1d4fd31ffed..1682cc6d34c 100644
--- a/src/backend/storage/ipc/dsm_registry.c
+++ b/src/backend/storage/ipc/dsm_registry.c
@@ -15,6 +15,20 @@
* current backend. This function guarantees that only one backend
* initializes the segment and that all other backends just attach it.
*
+ * A DSA can be created in or retrieved from the registry by calling
+ * GetNamedDSA(). As with GetNamedDSMSegment(), if a DSA with the provided
+ * name does not yet exist, it is created. Otherwise, GetNamedDSA()
+ * ensures the DSA is attached to the current backend. This function
+ * guarantees that only one backend initializes the DSA and that all other
+ * backends just attach it.
+ *
+ * A dshash table can be created in or retrieved from the registry by
+ * calling GetNamedDSHash(). As with GetNamedDSMSegment(), if a hash
+ * table with the provided name does not yet exist, it is created.
+ * Otherwise, GetNamedDSHash() ensures the hash table is attached to the
+ * current backend. This function guarantees that only one backend
+ * initializes the table and that all other backends just attach it.
+ *
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
@@ -26,12 +40,20 @@
#include "postgres.h"
+#include "funcapi.h"
#include "lib/dshash.h"
#include "storage/dsm_registry.h"
#include "storage/lwlock.h"
#include "storage/shmem.h"
+#include "utils/builtins.h"
#include "utils/memutils.h"
+#define DSMR_NAME_LEN 128
+
+#define DSMR_DSA_TRANCHE_SUFFIX " DSA"
+#define DSMR_DSA_TRANCHE_SUFFIX_LEN (sizeof(DSMR_DSA_TRANCHE_SUFFIX) - 1)
+#define DSMR_DSA_TRANCHE_NAME_LEN (DSMR_NAME_LEN + DSMR_DSA_TRANCHE_SUFFIX_LEN)
+
typedef struct DSMRegistryCtxStruct
{
dsa_handle dsah;
@@ -40,15 +62,55 @@ typedef struct DSMRegistryCtxStruct
static DSMRegistryCtxStruct *DSMRegistryCtx;
-typedef struct DSMRegistryEntry
+typedef struct NamedDSMState
{
- char name[64];
dsm_handle handle;
size_t size;
+} NamedDSMState;
+
+typedef struct NamedDSAState
+{
+ dsa_handle handle;
+ int tranche;
+ char tranche_name[DSMR_DSA_TRANCHE_NAME_LEN];
+} NamedDSAState;
+
+typedef struct NamedDSHState
+{
+ NamedDSAState dsa;
+ dshash_table_handle handle;
+ int tranche;
+ char tranche_name[DSMR_NAME_LEN];
+} NamedDSHState;
+
+typedef enum DSMREntryType
+{
+ DSMR_ENTRY_TYPE_DSM,
+ DSMR_ENTRY_TYPE_DSA,
+ DSMR_ENTRY_TYPE_DSH,
+} DSMREntryType;
+
+static const char *const DSMREntryTypeNames[] =
+{
+ [DSMR_ENTRY_TYPE_DSM] = "segment",
+ [DSMR_ENTRY_TYPE_DSA] = "area",
+ [DSMR_ENTRY_TYPE_DSH] = "hash",
+};
+
+typedef struct DSMRegistryEntry
+{
+ char name[DSMR_NAME_LEN];
+ DSMREntryType type;
+ union
+ {
+ NamedDSMState dsm;
+ NamedDSAState dsa;
+ NamedDSHState dsh;
+ } data;
} DSMRegistryEntry;
static const dshash_parameters dsh_params = {
- offsetof(DSMRegistryEntry, handle),
+ offsetof(DSMRegistryEntry, type),
sizeof(DSMRegistryEntry),
dshash_strcmp,
dshash_strhash,
@@ -141,7 +203,7 @@ GetNamedDSMSegment(const char *name, size_t size,
ereport(ERROR,
(errmsg("DSM segment name cannot be empty")));
- if (strlen(name) >= offsetof(DSMRegistryEntry, handle))
+ if (strlen(name) >= offsetof(DSMRegistryEntry, type))
ereport(ERROR,
(errmsg("DSM segment name too long")));
@@ -158,32 +220,39 @@ GetNamedDSMSegment(const char *name, size_t size,
entry = dshash_find_or_insert(dsm_registry_table, name, found);
if (!(*found))
{
+ NamedDSMState *state = &entry->data.dsm;
+ dsm_segment *seg;
+
+ entry->type = DSMR_ENTRY_TYPE_DSM;
+
/* Initialize the segment. */
- dsm_segment *seg = dsm_create(size, 0);
+ seg = dsm_create(size, 0);
dsm_pin_segment(seg);
dsm_pin_mapping(seg);
- entry->handle = dsm_segment_handle(seg);
- entry->size = size;
+ state->handle = dsm_segment_handle(seg);
+ state->size = size;
ret = dsm_segment_address(seg);
if (init_callback)
(*init_callback) (ret);
}
- else if (entry->size != size)
- {
+ else if (entry->type != DSMR_ENTRY_TYPE_DSM)
ereport(ERROR,
- (errmsg("requested DSM segment size does not match size of "
- "existing segment")));
- }
+ (errmsg("requested DSM segment does not match type of existing entry")));
+ else if (entry->data.dsm.size != size)
+ ereport(ERROR,
+ (errmsg("requested DSM segment size does not match size of existing segment")));
else
{
- dsm_segment *seg = dsm_find_mapping(entry->handle);
+ NamedDSMState *state = &entry->data.dsm;
+ dsm_segment *seg;
/* If the existing segment is not already attached, attach it now. */
+ seg = dsm_find_mapping(state->handle);
if (seg == NULL)
{
- seg = dsm_attach(entry->handle);
+ seg = dsm_attach(state->handle);
if (seg == NULL)
elog(ERROR, "could not map dynamic shared memory segment");
@@ -198,3 +267,220 @@ GetNamedDSMSegment(const char *name, size_t size,
return ret;
}
+
+/*
+ * Initialize or attach a named DSA.
+ *
+ * This routine returns a pointer to the DSA. A new LWLock tranche ID will be
+ * generated if needed. Note that the lock tranche will be registered with the
+ * provided name. Also note that this should be called at most once for a
+ * given DSA in each backend.
+ */
+dsa_area *
+GetNamedDSA(const char *name, bool *found)
+{
+ DSMRegistryEntry *entry;
+ MemoryContext oldcontext;
+ dsa_area *ret;
+
+ Assert(found);
+
+ if (!name || *name == '\0')
+ ereport(ERROR,
+ (errmsg("DSA name cannot be empty")));
+
+ if (strlen(name) >= offsetof(DSMRegistryEntry, type))
+ ereport(ERROR,
+ (errmsg("DSA name too long")));
+
+ /* Be sure any local memory allocated by DSM/DSA routines is persistent. */
+ oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+
+ /* Connect to the registry. */
+ init_dsm_registry();
+
+ entry = dshash_find_or_insert(dsm_registry_table, name, found);
+ if (!(*found))
+ {
+ NamedDSAState *state = &entry->data.dsa;
+
+ entry->type = DSMR_ENTRY_TYPE_DSA;
+
+ /* Initialize the LWLock tranche for the DSA. */
+ state->tranche = LWLockNewTrancheId();
+ strcpy(state->tranche_name, name);
+ LWLockRegisterTranche(state->tranche, state->tranche_name);
+
+ /* Initialize the DSA. */
+ ret = dsa_create(state->tranche);
+ dsa_pin(ret);
+ dsa_pin_mapping(ret);
+
+ /* Store handle for other backends to use. */
+ state->handle = dsa_get_handle(ret);
+ }
+ else if (entry->type != DSMR_ENTRY_TYPE_DSA)
+ ereport(ERROR,
+ (errmsg("requested DSA does not match type of existing entry")));
+ else
+ {
+ NamedDSAState *state = &entry->data.dsa;
+
+ if (dsa_is_attached(state->handle))
+ ereport(ERROR,
+ (errmsg("requested DSA already attached to current process")));
+
+ /* Initialize existing LWLock tranche for the DSA. */
+ LWLockRegisterTranche(state->tranche, state->tranche_name);
+
+ /* Attach to existing DSA. */
+ ret = dsa_attach(state->handle);
+ dsa_pin_mapping(ret);
+ }
+
+ dshash_release_lock(dsm_registry_table, entry);
+ MemoryContextSwitchTo(oldcontext);
+
+ return ret;
+}
+
+/*
+ * Initialize or attach a named dshash table.
+ *
+ * This routine returns the address of the table. The tranche_id member of
+ * params is ignored; new tranche IDs will be generated if needed. Note that
+ * the DSA lock tranche will be registered with the provided name with " DSA"
+ * appended. The dshash lock tranche will be registered with the provided
+ * name. Also note that this should be called at most once for a given table
+ * in each backend.
+ */
+dshash_table *
+GetNamedDSHash(const char *name, const dshash_parameters *params, bool *found)
+{
+ DSMRegistryEntry *entry;
+ MemoryContext oldcontext;
+ dshash_table *ret;
+
+ Assert(params);
+ Assert(found);
+
+ if (!name || *name == '\0')
+ ereport(ERROR,
+ (errmsg("DSHash name cannot be empty")));
+
+ if (strlen(name) >= offsetof(DSMRegistryEntry, type))
+ ereport(ERROR,
+ (errmsg("DSHash name too long")));
+
+ /* Be sure any local memory allocated by DSM/DSA routines is persistent. */
+ oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+
+ /* Connect to the registry. */
+ init_dsm_registry();
+
+ entry = dshash_find_or_insert(dsm_registry_table, name, found);
+ if (!(*found))
+ {
+ NamedDSAState *dsa_state = &entry->data.dsh.dsa;
+ NamedDSHState *dsh_state = &entry->data.dsh;
+ dshash_parameters params_copy;
+ dsa_area *dsa;
+
+ entry->type = DSMR_ENTRY_TYPE_DSH;
+
+ /* Initialize the LWLock tranche for the DSA. */
+ dsa_state->tranche = LWLockNewTrancheId();
+ sprintf(dsa_state->tranche_name, "%s%s", name, DSMR_DSA_TRANCHE_SUFFIX);
+ LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name);
+
+ /* Initialize the LWLock tranche for the dshash table. */
+ dsh_state->tranche = LWLockNewTrancheId();
+ strcpy(dsh_state->tranche_name, name);
+ LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name);
+
+ /* Initialize the DSA for the hash table. */
+ dsa = dsa_create(dsa_state->tranche);
+ dsa_pin(dsa);
+ dsa_pin_mapping(dsa);
+
+ /* Initialize the dshash table. */
+ memcpy(&params_copy, params, sizeof(dshash_parameters));
+ params_copy.tranche_id = dsh_state->tranche;
+ ret = dshash_create(dsa, &params_copy, NULL);
+
+ /* Store handles for other backends to use. */
+ dsa_state->handle = dsa_get_handle(dsa);
+ dsh_state->handle = dshash_get_hash_table_handle(ret);
+ }
+ else if (entry->type != DSMR_ENTRY_TYPE_DSH)
+ ereport(ERROR,
+ (errmsg("requested DSHash does not match type of existing entry")));
+ else
+ {
+ NamedDSAState *dsa_state = &entry->data.dsh.dsa;
+ NamedDSHState *dsh_state = &entry->data.dsh;
+ dsa_area *dsa;
+
+ /* XXX: Should we verify params matches what table was created with? */
+
+ if (dsa_is_attached(dsa_state->handle))
+ ereport(ERROR,
+ (errmsg("requested DSHash already attached to current process")));
+
+ /* Initialize existing LWLock tranches for the DSA and dshash table. */
+ LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name);
+ LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name);
+
+ /* Attach to existing DSA for the hash table. */
+ dsa = dsa_attach(dsa_state->handle);
+ dsa_pin_mapping(dsa);
+
+ /* Attach to existing dshash table. */
+ ret = dshash_attach(dsa, params, dsh_state->handle, NULL);
+ }
+
+ dshash_release_lock(dsm_registry_table, entry);
+ MemoryContextSwitchTo(oldcontext);
+
+ return ret;
+}
+
+Datum
+pg_get_dsm_registry_allocations(PG_FUNCTION_ARGS)
+{
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ DSMRegistryEntry *entry;
+ MemoryContext oldcontext;
+ dshash_seq_status status;
+
+ InitMaterializedSRF(fcinfo, MAT_SRF_USE_EXPECTED_DESC);
+
+ /* Be sure any local memory allocated by DSM/DSA routines is persistent. */
+ oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+ init_dsm_registry();
+ MemoryContextSwitchTo(oldcontext);
+
+ dshash_seq_init(&status, dsm_registry_table, false);
+ while ((entry = dshash_seq_next(&status)) != NULL)
+ {
+ Datum vals[3];
+ bool nulls[3] = {0};
+
+ vals[0] = CStringGetTextDatum(entry->name);
+ vals[1] = CStringGetTextDatum(DSMREntryTypeNames[entry->type]);
+
+ /*
+ * Since we can't know the size of DSA/dshash entries without first
+ * attaching to them, return NULL for those.
+ */
+ if (entry->type == DSMR_ENTRY_TYPE_DSM)
+ vals[2] = Int64GetDatum(entry->data.dsm.size);
+ else
+ nulls[2] = true;
+
+ tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, vals, nulls);
+ }
+ dshash_seq_term(&status);
+
+ return (Datum) 0;
+}
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 00c76d05356..2fa045e6b0f 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -51,7 +51,6 @@
#include "storage/sinvaladt.h"
#include "utils/guc.h"
#include "utils/injection_point.h"
-#include "utils/memutils.h"
/* GUCs */
int shared_memory_type = DEFAULT_SHARED_MEMORY_TYPE;
@@ -151,7 +150,6 @@ CalculateShmemSize(int *num_semaphores)
size = add_size(size, InjectionPointShmemSize());
size = add_size(size, SlotSyncShmemSize());
size = add_size(size, AioShmemSize());
- size = add_size(size, MemoryContextReportingShmemSize());
/* include additional requested shmem from preload libraries */
size = add_size(size, total_addin_request);
@@ -345,7 +343,6 @@ CreateOrAttachShmemStructs(void)
WaitEventCustomShmemInit();
InjectionPointShmemInit();
AioShmemInit();
- MemoryContextReportingShmemInit();
}
/*
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index ce69e26d720..a9bb540b55a 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -691,9 +691,6 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
if (CheckProcSignal(PROCSIG_LOG_MEMORY_CONTEXT))
HandleLogMemoryContextInterrupt();
- if (CheckProcSignal(PROCSIG_GET_MEMORY_CONTEXT))
- HandleGetMemoryContextInterrupt();
-
if (CheckProcSignal(PROCSIG_PARALLEL_APPLY_MESSAGE))
HandleParallelApplyMessageInterrupt();
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index c9ae3b45b76..ca3656fc76f 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -679,12 +679,10 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
*/
for (i = 0; i < shm_ent_page_count; i++)
{
- volatile uint64 touch pg_attribute_unused();
-
page_ptrs[i] = startptr + (i * os_page_size);
if (firstNumaTouch)
- pg_numa_touch_mem_if_required(touch, page_ptrs[i]);
+ pg_numa_touch_mem_if_required(page_ptrs[i]);
CHECK_FOR_INTERRUPTS();
}
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 7fa8d9247e0..4222bdab078 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -1376,7 +1376,7 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
if (xlrec.subxid_overflow)
elog(DEBUG2,
- "snapshot of %d running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
+ "snapshot of %d running transactions overflowed (lsn %X/%08X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt,
LSN_FORMAT_ARGS(recptr),
CurrRunningXacts->oldestRunningXid,
@@ -1384,7 +1384,7 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
CurrRunningXacts->nextXid);
else
elog(DEBUG2,
- "snapshot of %d+%d running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
+ "snapshot of %d+%d running transaction ids (lsn %X/%08X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt, CurrRunningXacts->subxcnt,
LSN_FORMAT_ARGS(recptr),
CurrRunningXacts->oldestRunningXid,
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index f50962983c3..3f6bf70bd3c 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -717,7 +717,10 @@ XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid,
* through, to avoid slowing down the normal case.)
*/
if (!first)
+ {
+ CHECK_FOR_INTERRUPTS();
pg_usleep(1000L);
+ }
first = false;
xid = SubTransGetTopmostTransaction(xid);
}
@@ -757,7 +760,10 @@ ConditionalXactLockTableWait(TransactionId xid, bool logLockFailure)
/* See XactLockTableWait about this case */
if (!first)
+ {
+ CHECK_FOR_INTERRUPTS();
pg_usleep(1000L);
+ }
first = false;
xid = SubTransGetTopmostTransaction(xid);
}
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 86b06b9223f..62f3471448e 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -51,7 +51,7 @@
/* GUC variables */
int max_locks_per_xact; /* used to set the lock table size */
-bool log_lock_failure = false;
+bool log_lock_failures = false;
#define NLOCKENTS() \
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
@@ -3539,9 +3539,9 @@ AtPrepare_Locks(void)
* but that probably costs more cycles.
*/
void
-PostPrepare_Locks(TransactionId xid)
+PostPrepare_Locks(FullTransactionId fxid)
{
- PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
+ PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
HASH_SEQ_STATUS status;
LOCALLOCK *locallock;
LOCK *lock;
@@ -4324,11 +4324,11 @@ DumpAllLocks(void)
* and PANIC anyway.
*/
void
-lock_twophase_recover(TransactionId xid, uint16 info,
+lock_twophase_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
- PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
+ PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
@@ -4505,7 +4505,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
* starting up into hot standby mode.
*/
void
-lock_twophase_standby_recover(TransactionId xid, uint16 info,
+lock_twophase_standby_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
@@ -4524,7 +4524,7 @@ lock_twophase_standby_recover(TransactionId xid, uint16 info,
if (lockmode == AccessExclusiveLock &&
locktag->locktag_type == LOCKTAG_RELATION)
{
- StandbyAcquireAccessExclusiveLock(xid,
+ StandbyAcquireAccessExclusiveLock(XidFromFullTransactionId(fxid),
locktag->locktag_field1 /* dboid */ ,
locktag->locktag_field2 /* reloid */ );
}
@@ -4537,11 +4537,11 @@ lock_twophase_standby_recover(TransactionId xid, uint16 info,
* Find and release the lock indicated by the 2PC record.
*/
void
-lock_twophase_postcommit(TransactionId xid, uint16 info,
+lock_twophase_postcommit(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
- PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
+ PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
LOCKTAG *locktag;
LOCKMETHODID lockmethodid;
LockMethod lockMethodTable;
@@ -4563,10 +4563,10 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
* This is actually just the same as the COMMIT case.
*/
void
-lock_twophase_postabort(TransactionId xid, uint16 info,
+lock_twophase_postabort(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
- lock_twophase_postcommit(xid, info, recdata, len);
+ lock_twophase_postcommit(fxid, info, recdata, len);
}
/*
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 5148ef982e3..46f44bc4511 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -178,8 +178,6 @@ static const char *const BuiltinTrancheNames[] = {
[LWTRANCHE_XACT_SLRU] = "XactSLRU",
[LWTRANCHE_PARALLEL_VACUUM_DSA] = "ParallelVacuumDSA",
[LWTRANCHE_AIO_URING_COMPLETION] = "AioUringCompletion",
- [LWTRANCHE_MEMORY_CONTEXT_REPORTING_STATE] = "MemoryContextReportingState",
- [LWTRANCHE_MEMORY_CONTEXT_REPORTING_PROC] = "MemoryContextReportingPerProcess",
};
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index d82114ffca1..c07fb588355 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -191,7 +191,7 @@
* AtPrepare_PredicateLocks(void);
* PostPrepare_PredicateLocks(TransactionId xid);
* PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
- * predicatelock_twophase_recover(TransactionId xid, uint16 info,
+ * predicatelock_twophase_recover(FullTransactionId fxid, uint16 info,
* void *recdata, uint32 len);
*/
@@ -4856,7 +4856,7 @@ AtPrepare_PredicateLocks(void)
* anyway. We only need to clean up our local state.
*/
void
-PostPrepare_PredicateLocks(TransactionId xid)
+PostPrepare_PredicateLocks(FullTransactionId fxid)
{
if (MySerializableXact == InvalidSerializableXact)
return;
@@ -4879,12 +4879,12 @@ PostPrepare_PredicateLocks(TransactionId xid)
* commits or aborts.
*/
void
-PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
+PredicateLockTwoPhaseFinish(FullTransactionId fxid, bool isCommit)
{
SERIALIZABLEXID *sxid;
SERIALIZABLEXIDTAG sxidtag;
- sxidtag.xid = xid;
+ sxidtag.xid = XidFromFullTransactionId(fxid);
LWLockAcquire(SerializableXactHashLock, LW_SHARED);
sxid = (SERIALIZABLEXID *)
@@ -4906,10 +4906,11 @@ PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
* Re-acquire a predicate lock belonging to a transaction that was prepared.
*/
void
-predicatelock_twophase_recover(TransactionId xid, uint16 info,
+predicatelock_twophase_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhasePredicateRecord *record;
+ TransactionId xid = XidFromFullTransactionId(fxid);
Assert(len == sizeof(TwoPhasePredicateRecord));
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index f194e6b3dcc..e9ef0fbfe32 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -50,7 +50,6 @@
#include "storage/procsignal.h"
#include "storage/spin.h"
#include "storage/standby.h"
-#include "utils/memutils.h"
#include "utils/timeout.h"
#include "utils/timestamp.h"