<para>
In general, <productname>PostgreSQL</productname> can be expected to work on
- these CPU architectures: x86, x86_64, IA64, PowerPC,
+ these CPU architectures: x86, x86_64, PowerPC,
PowerPC 64, S/390, S/390x, Sparc, Sparc 64, ARM, MIPS, MIPSEL,
and PA-RISC. Code support exists for M68K, M32R, and VAX, but these
architectures are not known to have been tested recently. It is often
*/
static char *stack_base_ptr = NULL;
-/*
- * On IA64 we also have to remember the register stack base.
- */
-#if defined(__ia64__) || defined(__ia64)
-static char *register_stack_base_ptr = NULL;
-#endif
-
/*
* Flag to keep track of whether we have started a transaction.
* For extended query protocol this has to be remembered across messages.
ProcessLogMemoryContextInterrupt();
}
-
-/*
- * IA64-specific code to fetch the AR.BSP register for stack depth checks.
- *
- * We currently support gcc and icc here.
- *
- * Note: while icc accepts gcc asm blocks on x86[_64], this is not true on
- * ia64 (at least not in icc versions before 12.x). So we have to carry a
- * separate implementation for it.
- */
-#if defined(__ia64__) || defined(__ia64)
-
-#if defined(__INTEL_COMPILER)
-/* icc */
-#include <asm/ia64regs.h>
-#define ia64_get_bsp() ((char *) __getReg(_IA64_REG_AR_BSP))
-#else
-/* gcc */
-static __inline__ char *
-ia64_get_bsp(void)
-{
- char *ret;
-
- /* the ;; is a "stop", seems to be required before fetching BSP */
- __asm__ __volatile__(
- ";;\n"
- " mov %0=ar.bsp \n"
-: "=r"(ret));
-
- return ret;
-}
-#endif
-#endif /* IA64 */
-
-
/*
* set_stack_base: set up reference point for stack depth checking
*
#endif
pg_stack_base_t old;
-#if defined(__ia64__) || defined(__ia64)
- old.stack_base_ptr = stack_base_ptr;
- old.register_stack_base_ptr = register_stack_base_ptr;
-#else
old = stack_base_ptr;
-#endif
/*
* Set up reference point for stack depth checking. On recent gcc we use
#else
stack_base_ptr = &stack_base;
#endif
-#if defined(__ia64__) || defined(__ia64)
- register_stack_base_ptr = ia64_get_bsp();
-#endif
return old;
}
void
restore_stack_base(pg_stack_base_t base)
{
-#if defined(__ia64__) || defined(__ia64)
- stack_base_ptr = base.stack_base_ptr;
- register_stack_base_ptr = base.register_stack_base_ptr;
-#else
stack_base_ptr = base;
-#endif
}
/*
stack_base_ptr != NULL)
return true;
- /*
- * On IA64 there is a separate "register" stack that requires its own
- * independent check. For this, we have to measure the change in the
- * "BSP" pointer from PostgresMain to here. Logic is just as above,
- * except that we know IA64's register stack grows up.
- *
- * Note we assume that the same max_stack_depth applies to both stacks.
- */
-#if defined(__ia64__) || defined(__ia64)
- stack_depth = (long) (ia64_get_bsp() - register_stack_base_ptr);
-
- if (stack_depth > max_stack_depth_bytes &&
- register_stack_base_ptr != NULL)
- return true;
-#endif /* IA64 */
-
return false;
}
/* in tcop/postgres.c */
-#if defined(__ia64__) || defined(__ia64)
-typedef struct
-{
- char *stack_base_ptr;
- char *register_stack_base_ptr;
-} pg_stack_base_t;
-#else
typedef char *pg_stack_base_t;
-#endif
extern pg_stack_base_t set_stack_base(void);
extern void restore_stack_base(pg_stack_base_t base);
#include "port/atomics/arch-arm.h"
#elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
#include "port/atomics/arch-x86.h"
-#elif defined(__ia64__) || defined(__ia64)
-#include "port/atomics/arch-ia64.h"
#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
#include "port/atomics/arch-ppc.h"
#elif defined(__hppa) || defined(__hppa__)
+++ /dev/null
-/*-------------------------------------------------------------------------
- *
- * arch-ia64.h
- * Atomic operations considerations specific to intel itanium
- *
- * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * NOTES:
- *
- * src/include/port/atomics/arch-ia64.h
- *
- *-------------------------------------------------------------------------
- */
-
-/*
- * Itanium is weakly ordered, so read and write barriers require a full
- * fence.
- */
-#if defined(__INTEL_COMPILER)
-# define pg_memory_barrier_impl() __mf()
-#elif defined(__GNUC__)
-# define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
-#endif
-
-/* per architecture manual doubleword accesses have single copy atomicity */
-#define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY
return ret;
}
-/* Only implemented on itanium and 64bit builds */
+/* Only implemented on 64bit builds */
#ifdef _WIN64
#pragma intrinsic(_InterlockedExchangeAdd64)
#endif /* __x86_64__ */
-#if defined(__ia64__) || defined(__ia64)
-/*
- * Intel Itanium, gcc or Intel's compiler.
- *
- * Itanium has weak memory ordering, but we rely on the compiler to enforce
- * strict ordering of accesses to volatile data. In particular, while the
- * xchg instruction implicitly acts as a memory barrier with 'acquire'
- * semantics, we do not have an explicit memory fence instruction in the
- * S_UNLOCK macro. We use a regular assignment to clear the spinlock, and
- * trust that the compiler marks the generated store instruction with the
- * ".rel" opcode.
- *
- * Testing shows that assumption to hold on gcc, although I could not find
- * any explicit statement on that in the gcc manual. In Intel's compiler,
- * the -m[no-]serialize-volatile option controls that, and testing shows that
- * it is enabled by default.
- *
- * While icc accepts gcc asm blocks on x86[_64], this is not true on ia64
- * (at least not in icc versions before 12.x). So we have to carry a separate
- * compiler-intrinsic-based implementation for it.
- */
-#define HAS_TEST_AND_SET
-
-typedef unsigned int slock_t;
-
-#define TAS(lock) tas(lock)
-
-/* On IA64, it's a win to use a non-locking test before the xchg proper */
-#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
-
-#ifndef __INTEL_COMPILER
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
- long int ret;
-
- __asm__ __volatile__(
- " xchg4 %0=%1,%2 \n"
-: "=r"(ret), "+m"(*lock)
-: "r"(1)
-: "memory");
- return (int) ret;
-}
-
-#else /* __INTEL_COMPILER */
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
- int ret;
-
- ret = _InterlockedExchange(lock,1); /* this is a xchg asm macro */
-
- return ret;
-}
-
-/* icc can't use the regular gcc S_UNLOCK() macro either in this case */
-#define S_UNLOCK(lock) \
- do { __memory_barrier(); *(lock) = 0; } while (0)
-
-#endif /* __INTEL_COMPILER */
-#endif /* __ia64__ || __ia64 */
-
-
/*
* On ARM and ARM64, we use __sync_lock_test_and_set(int *, int) if available.
*
# relevant to our platform will be included by atomics.h.
test "$f" = src/include/port/atomics/arch-arm.h && continue
test "$f" = src/include/port/atomics/arch-hppa.h && continue
- test "$f" = src/include/port/atomics/arch-ia64.h && continue
test "$f" = src/include/port/atomics/arch-ppc.h && continue
test "$f" = src/include/port/atomics/arch-x86.h && continue
test "$f" = src/include/port/atomics/fallback.h && continue
# relevant to our platform will be included by atomics.h.
test "$f" = src/include/port/atomics/arch-arm.h && continue
test "$f" = src/include/port/atomics/arch-hppa.h && continue
- test "$f" = src/include/port/atomics/arch-ia64.h && continue
test "$f" = src/include/port/atomics/arch-ppc.h && continue
test "$f" = src/include/port/atomics/arch-x86.h && continue
test "$f" = src/include/port/atomics/fallback.h && continue