aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/tcop/postgres.c71
-rw-r--r--src/include/miscadmin.h8
-rw-r--r--src/include/port/atomics.h2
-rw-r--r--src/include/port/atomics/arch-ia64.h27
-rw-r--r--src/include/port/atomics/generic-msvc.h2
-rw-r--r--src/include/storage/s_lock.h65
-rwxr-xr-xsrc/tools/pginclude/cpluspluscheck1
-rwxr-xr-xsrc/tools/pginclude/headerscheck1
8 files changed, 1 insertions, 176 deletions
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index cbb0ec606f9..6f18b688569 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -133,13 +133,6 @@ static long max_stack_depth_bytes = 100 * 1024L;
static char *stack_base_ptr = NULL;
/*
- * On IA64 we also have to remember the register stack base.
- */
-#if defined(__ia64__) || defined(__ia64)
-static char *register_stack_base_ptr = NULL;
-#endif
-
-/*
* Flag to keep track of whether we have started a transaction.
* For extended query protocol this has to be remembered across messages.
*/
@@ -3392,41 +3385,6 @@ ProcessInterrupts(void)
ProcessLogMemoryContextInterrupt();
}
-
-/*
- * IA64-specific code to fetch the AR.BSP register for stack depth checks.
- *
- * We currently support gcc and icc here.
- *
- * Note: while icc accepts gcc asm blocks on x86[_64], this is not true on
- * ia64 (at least not in icc versions before 12.x). So we have to carry a
- * separate implementation for it.
- */
-#if defined(__ia64__) || defined(__ia64)
-
-#if defined(__INTEL_COMPILER)
-/* icc */
-#include <asm/ia64regs.h>
-#define ia64_get_bsp() ((char *) __getReg(_IA64_REG_AR_BSP))
-#else
-/* gcc */
-static __inline__ char *
-ia64_get_bsp(void)
-{
- char *ret;
-
- /* the ;; is a "stop", seems to be required before fetching BSP */
- __asm__ __volatile__(
- ";;\n"
- " mov %0=ar.bsp \n"
-: "=r"(ret));
-
- return ret;
-}
-#endif
-#endif /* IA64 */
-
-
/*
* set_stack_base: set up reference point for stack depth checking
*
@@ -3440,12 +3398,7 @@ set_stack_base(void)
#endif
pg_stack_base_t old;
-#if defined(__ia64__) || defined(__ia64)
- old.stack_base_ptr = stack_base_ptr;
- old.register_stack_base_ptr = register_stack_base_ptr;
-#else
old = stack_base_ptr;
-#endif
/*
* Set up reference point for stack depth checking. On recent gcc we use
@@ -3457,9 +3410,6 @@ set_stack_base(void)
#else
stack_base_ptr = &stack_base;
#endif
-#if defined(__ia64__) || defined(__ia64)
- register_stack_base_ptr = ia64_get_bsp();
-#endif
return old;
}
@@ -3476,12 +3426,7 @@ set_stack_base(void)
void
restore_stack_base(pg_stack_base_t base)
{
-#if defined(__ia64__) || defined(__ia64)
- stack_base_ptr = base.stack_base_ptr;
- register_stack_base_ptr = base.register_stack_base_ptr;
-#else
stack_base_ptr = base;
-#endif
}
/*
@@ -3538,22 +3483,6 @@ stack_is_too_deep(void)
stack_base_ptr != NULL)
return true;
- /*
- * On IA64 there is a separate "register" stack that requires its own
- * independent check. For this, we have to measure the change in the
- * "BSP" pointer from PostgresMain to here. Logic is just as above,
- * except that we know IA64's register stack grows up.
- *
- * Note we assume that the same max_stack_depth applies to both stacks.
- */
-#if defined(__ia64__) || defined(__ia64)
- stack_depth = (long) (ia64_get_bsp() - register_stack_base_ptr);
-
- if (stack_depth > max_stack_depth_bytes &&
- register_stack_base_ptr != NULL)
- return true;
-#endif /* IA64 */
-
return false;
}
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 0af130fbc5d..ea9a56d3955 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -279,15 +279,7 @@ extern PGDLLIMPORT bool VacuumCostActive;
/* in tcop/postgres.c */
-#if defined(__ia64__) || defined(__ia64)
-typedef struct
-{
- char *stack_base_ptr;
- char *register_stack_base_ptr;
-} pg_stack_base_t;
-#else
typedef char *pg_stack_base_t;
-#endif
extern pg_stack_base_t set_stack_base(void);
extern void restore_stack_base(pg_stack_base_t base);
diff --git a/src/include/port/atomics.h b/src/include/port/atomics.h
index 33b7fd6042c..f7cd0f6f20c 100644
--- a/src/include/port/atomics.h
+++ b/src/include/port/atomics.h
@@ -68,8 +68,6 @@
#include "port/atomics/arch-arm.h"
#elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
#include "port/atomics/arch-x86.h"
-#elif defined(__ia64__) || defined(__ia64)
-#include "port/atomics/arch-ia64.h"
#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
#include "port/atomics/arch-ppc.h"
#elif defined(__hppa) || defined(__hppa__)
diff --git a/src/include/port/atomics/arch-ia64.h b/src/include/port/atomics/arch-ia64.h
deleted file mode 100644
index 264d2316b97..00000000000
--- a/src/include/port/atomics/arch-ia64.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * arch-ia64.h
- * Atomic operations considerations specific to intel itanium
- *
- * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * NOTES:
- *
- * src/include/port/atomics/arch-ia64.h
- *
- *-------------------------------------------------------------------------
- */
-
-/*
- * Itanium is weakly ordered, so read and write barriers require a full
- * fence.
- */
-#if defined(__INTEL_COMPILER)
-# define pg_memory_barrier_impl() __mf()
-#elif defined(__GNUC__)
-# define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
-#endif
-
-/* per architecture manual doubleword accesses have single copy atomicity */
-#define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY
diff --git a/src/include/port/atomics/generic-msvc.h b/src/include/port/atomics/generic-msvc.h
index 6294162b4cc..1a4adfde686 100644
--- a/src/include/port/atomics/generic-msvc.h
+++ b/src/include/port/atomics/generic-msvc.h
@@ -86,7 +86,7 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
return ret;
}
-/* Only implemented on itanium and 64bit builds */
+/* Only implemented on 64bit builds */
#ifdef _WIN64
#pragma intrinsic(_InterlockedExchangeAdd64)
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 1e1eb324b43..c4a19b2f433 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -250,71 +250,6 @@ spin_delay(void)
#endif /* __x86_64__ */
-#if defined(__ia64__) || defined(__ia64)
-/*
- * Intel Itanium, gcc or Intel's compiler.
- *
- * Itanium has weak memory ordering, but we rely on the compiler to enforce
- * strict ordering of accesses to volatile data. In particular, while the
- * xchg instruction implicitly acts as a memory barrier with 'acquire'
- * semantics, we do not have an explicit memory fence instruction in the
- * S_UNLOCK macro. We use a regular assignment to clear the spinlock, and
- * trust that the compiler marks the generated store instruction with the
- * ".rel" opcode.
- *
- * Testing shows that assumption to hold on gcc, although I could not find
- * any explicit statement on that in the gcc manual. In Intel's compiler,
- * the -m[no-]serialize-volatile option controls that, and testing shows that
- * it is enabled by default.
- *
- * While icc accepts gcc asm blocks on x86[_64], this is not true on ia64
- * (at least not in icc versions before 12.x). So we have to carry a separate
- * compiler-intrinsic-based implementation for it.
- */
-#define HAS_TEST_AND_SET
-
-typedef unsigned int slock_t;
-
-#define TAS(lock) tas(lock)
-
-/* On IA64, it's a win to use a non-locking test before the xchg proper */
-#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
-
-#ifndef __INTEL_COMPILER
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
- long int ret;
-
- __asm__ __volatile__(
- " xchg4 %0=%1,%2 \n"
-: "=r"(ret), "+m"(*lock)
-: "r"(1)
-: "memory");
- return (int) ret;
-}
-
-#else /* __INTEL_COMPILER */
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
- int ret;
-
- ret = _InterlockedExchange(lock,1); /* this is a xchg asm macro */
-
- return ret;
-}
-
-/* icc can't use the regular gcc S_UNLOCK() macro either in this case */
-#define S_UNLOCK(lock) \
- do { __memory_barrier(); *(lock) = 0; } while (0)
-
-#endif /* __INTEL_COMPILER */
-#endif /* __ia64__ || __ia64 */
-
-
/*
* On ARM and ARM64, we use __sync_lock_test_and_set(int *, int) if available.
*
diff --git a/src/tools/pginclude/cpluspluscheck b/src/tools/pginclude/cpluspluscheck
index 7b547261751..3137cddf118 100755
--- a/src/tools/pginclude/cpluspluscheck
+++ b/src/tools/pginclude/cpluspluscheck
@@ -81,7 +81,6 @@ do
# relevant to our platform will be included by atomics.h.
test "$f" = src/include/port/atomics/arch-arm.h && continue
test "$f" = src/include/port/atomics/arch-hppa.h && continue
- test "$f" = src/include/port/atomics/arch-ia64.h && continue
test "$f" = src/include/port/atomics/arch-ppc.h && continue
test "$f" = src/include/port/atomics/arch-x86.h && continue
test "$f" = src/include/port/atomics/fallback.h && continue
diff --git a/src/tools/pginclude/headerscheck b/src/tools/pginclude/headerscheck
index 225224a9bc8..ee4e593263b 100755
--- a/src/tools/pginclude/headerscheck
+++ b/src/tools/pginclude/headerscheck
@@ -77,7 +77,6 @@ do
# relevant to our platform will be included by atomics.h.
test "$f" = src/include/port/atomics/arch-arm.h && continue
test "$f" = src/include/port/atomics/arch-hppa.h && continue
- test "$f" = src/include/port/atomics/arch-ia64.h && continue
test "$f" = src/include/port/atomics/arch-ppc.h && continue
test "$f" = src/include/port/atomics/arch-x86.h && continue
test "$f" = src/include/port/atomics/fallback.h && continue