llvm/openmp/runtime/src/kmp_os.h

/*
 * kmp_os.h -- KPTS runtime header file.
 */

//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef KMP_OS_H
#define KMP_OS_H

#include "kmp_config.h"
#include <atomic>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>

#define KMP_FTN_PLAIN
#define KMP_FTN_APPEND
#define KMP_FTN_UPPER
/*
#define KMP_FTN_PREPEND 4
#define KMP_FTN_UAPPEND 5
*/

#define KMP_PTR_SKIP

/* -------------------------- Compiler variations ------------------------ */

#define KMP_OFF
#define KMP_ON

#define KMP_MEM_CONS_VOLATILE
#define KMP_MEM_CONS_FENCE

#ifndef KMP_MEM_CONS_MODEL
#define KMP_MEM_CONS_MODEL
#endif

#ifndef __has_cpp_attribute
#define __has_cpp_attribute
#endif

#ifndef __has_attribute
#define __has_attribute
#endif

/* ------------------------- Compiler recognition ---------------------- */
#define KMP_COMPILER_ICC
#define KMP_COMPILER_GCC
#define KMP_COMPILER_CLANG
#define KMP_COMPILER_MSVC
#define KMP_COMPILER_ICX

#if __INTEL_CLANG_COMPILER
#undef KMP_COMPILER_ICX
#define KMP_COMPILER_ICX
#elif defined(__INTEL_COMPILER)
#undef KMP_COMPILER_ICC
#define KMP_COMPILER_ICC
#elif defined(__clang__)
#undef KMP_COMPILER_CLANG
#define KMP_COMPILER_CLANG
#elif defined(__GNUC__)
#undef KMP_COMPILER_GCC
#define KMP_COMPILER_GCC
#elif defined(_MSC_VER)
#undef KMP_COMPILER_MSVC
#define KMP_COMPILER_MSVC
#else
#error Unknown compiler
#endif

#if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD || KMP_OS_NETBSD ||      \
     KMP_OS_DRAGONFLY || KMP_OS_AIX) &&                                        \
    !KMP_OS_WASI && !KMP_OS_EMSCRIPTEN
#define KMP_AFFINITY_SUPPORTED
#if KMP_OS_WINDOWS && KMP_ARCH_X86_64
#define KMP_GROUP_AFFINITY
#else
#define KMP_GROUP_AFFINITY
#endif
#else
#define KMP_AFFINITY_SUPPORTED
#define KMP_GROUP_AFFINITY
#endif

#if (KMP_OS_LINUX || (KMP_OS_FREEBSD && __FreeBSD_version >= 1301000))
#define KMP_HAVE_SCHED_GETCPU
#else
#define KMP_HAVE_SCHED_GETCPU
#endif

/* Check for quad-precision extension. */
#define KMP_HAVE_QUAD
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
/* _Quad is already defined for icc */
#undef KMP_HAVE_QUAD
#define KMP_HAVE_QUAD
#elif KMP_COMPILER_CLANG
/* Clang doesn't support a software-implemented
   128-bit extended precision type yet */
_Quad;
#elif KMP_COMPILER_GCC
/* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad until
   NetBSD 10.0 which ships with GCC 10.5 */
#if (!KMP_OS_NETBSD || __GNUC__ >= 10)
typedef __float128 _Quad;
#undef KMP_HAVE_QUAD
#define KMP_HAVE_QUAD
#endif
#elif KMP_COMPILER_MSVC
typedef long double _Quad;
#endif
#else
#if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
typedef long double _Quad;
#undef KMP_HAVE_QUAD
#define KMP_HAVE_QUAD
#endif
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */

#define KMP_USE_X87CONTROL
#if KMP_OS_WINDOWS
#define KMP_END_OF_LINE
typedef char kmp_int8;
typedef unsigned char kmp_uint8;
typedef short kmp_int16;
typedef unsigned short kmp_uint16;
typedef int kmp_int32;
typedef unsigned int kmp_uint32;
#define KMP_INT32_SPEC
#define KMP_UINT32_SPEC
#ifndef KMP_STRUCT64
typedef __int64 kmp_int64;
typedef unsigned __int64 kmp_uint64;
#define KMP_INT64_SPEC
#define KMP_UINT64_SPEC
#else
struct kmp_struct64 {
  kmp_int32 a, b;
};
typedef struct kmp_struct64 kmp_int64;
typedef struct kmp_struct64 kmp_uint64;
/* Not sure what to use for KMP_[U]INT64_SPEC here */
#endif
#if KMP_ARCH_X86 && KMP_MSVC_COMPAT
#undef KMP_USE_X87CONTROL
#define KMP_USE_X87CONTROL
#endif
#if KMP_ARCH_X86_64 || KMP_ARCH_AARCH64
#define KMP_INTPTR
typedef __int64 kmp_intptr_t;
typedef unsigned __int64 kmp_uintptr_t;
#define KMP_INTPTR_SPEC
#define KMP_UINTPTR_SPEC
#endif
#endif /* KMP_OS_WINDOWS */

#if KMP_OS_UNIX
#define KMP_END_OF_LINE
kmp_int8;
kmp_uint8;
kmp_int16;
kmp_uint16;
kmp_int32;
kmp_uint32;
kmp_int64;
kmp_uint64;
#define KMP_INT32_SPEC
#define KMP_UINT32_SPEC
#define KMP_INT64_SPEC
#define KMP_UINT64_SPEC
#endif /* KMP_OS_UNIX */

#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM ||          \
    KMP_ARCH_PPC || KMP_ARCH_AARCH64_32
#define KMP_SIZE_T_SPEC
#elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 ||                 \
    KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 ||             \
    KMP_ARCH_VE || KMP_ARCH_S390X
#define KMP_SIZE_T_SPEC
#else
#error "Can't determine size_t printf format specifier."
#endif

#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_WASM || KMP_ARCH_PPC
#define KMP_SIZE_T_MAX
#else
#define KMP_SIZE_T_MAX
#endif

kmp_size_t;
kmp_real32;
kmp_real64;

#ifndef KMP_INTPTR
#define KMP_INTPTR
kmp_intptr_t;
kmp_uintptr_t;
#define KMP_INTPTR_SPEC
#define KMP_UINTPTR_SPEC
#endif

#ifdef BUILD_I8
kmp_int;
kmp_uint;
#else
typedef kmp_int32 kmp_int;
typedef kmp_uint32 kmp_uint;
#endif /* BUILD_I8 */
#define KMP_INT_MAX
#define KMP_INT_MIN

// stdarg handling
#if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_WASM) &&  \
    (KMP_OS_FREEBSD || KMP_OS_LINUX || KMP_OS_WASI)
kmp_va_list;
#define kmp_va_deref(ap)
#define kmp_va_addr_of(ap)
#else
typedef va_list kmp_va_list;
#define kmp_va_deref
#define kmp_va_addr_of
#endif

#ifdef __cplusplus
// macros to cast out qualifiers and to re-interpret types
#define CCAST(type, var)
#define RCAST(type, var)
//-------------------------------------------------------------------------
// template for debug prints specification ( d, u, lld, llu ), and to obtain
// signed/unsigned flavors of a type
template <typename T> struct traits_t {};
// int
template <> struct traits_t<signed int> {};
// unsigned int
template <> struct traits_t<unsigned int> {};
// long
template <> struct traits_t<signed long> {};
// long long
template <> struct traits_t<signed long long> {};
// unsigned long long
template <> struct traits_t<unsigned long long> {};
//-------------------------------------------------------------------------
#else
#define CCAST
#define RCAST
#endif // __cplusplus

#define KMP_EXPORT

#if __GNUC__ >= 4 && !defined(__MINGW32__)
#define __forceinline
#endif

/* Check if the OS/arch can support user-level mwait */
// All mwait code tests for UMWAIT first, so it should only fall back to ring3
// MWAIT for KNL.
#define KMP_HAVE_MWAIT
#define KMP_HAVE_UMWAIT

#if KMP_OS_WINDOWS
// Don't include everything related to NT status code, we'll do that explicitly
#define WIN32_NO_STATUS
#include <windows.h>

static inline int KMP_GET_PAGE_SIZE(void) {
  SYSTEM_INFO si;
  GetSystemInfo(&si);
  return si.dwPageSize;
}
#else
#define KMP_GET_PAGE_SIZE()
#endif

#define PAGE_ALIGNED(_addr)
#define ALIGN_TO_PAGE(x)

/* ---------- Support for cache alignment, padding, etc. ----------------*/

#ifdef __cplusplus
extern "C" {
#endif // __cplusplus

#define INTERNODE_CACHE_LINE

/* Define the default size of the cache line */
#ifndef CACHE_LINE
#define CACHE_LINE
#else
#if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN)
// 2006-02-13: This produces too many warnings on OS X*. Disable for now
#warning CACHE_LINE is too small.
#endif
#endif /* CACHE_LINE */

#define KMP_CACHE_PREFETCH(ADDR)

// Define attribute that indicates that the fall through from the previous
// case label is intentional and should not be diagnosed by a compiler
//   Code from libcxx/include/__config
// Use a function like macro to imply that it must be followed by a semicolon
#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
#define KMP_FALLTHROUGH()
// icc cannot properly tell this attribute is absent so force off
#elif KMP_COMPILER_ICC
#define KMP_FALLTHROUGH
#elif __has_cpp_attribute(clang::fallthrough)
#define KMP_FALLTHROUGH
#elif __has_attribute(fallthrough) || __GNUC__ >= 7
#define KMP_FALLTHROUGH
#else
#define KMP_FALLTHROUGH
#endif

#if KMP_HAVE_ATTRIBUTE_WAITPKG
#define KMP_ATTRIBUTE_TARGET_WAITPKG
#else
#define KMP_ATTRIBUTE_TARGET_WAITPKG
#endif

#if KMP_HAVE_ATTRIBUTE_RTM
#define KMP_ATTRIBUTE_TARGET_RTM
#else
#define KMP_ATTRIBUTE_TARGET_RTM
#endif

// Define attribute that indicates a function does not return
#if __cplusplus >= 201103L
#define KMP_NORETURN
#elif KMP_OS_WINDOWS
#define KMP_NORETURN
#else
#define KMP_NORETURN
#endif

#if KMP_OS_WINDOWS && KMP_MSVC_COMPAT
#define KMP_ALIGN
#define KMP_THREAD_LOCAL
#define KMP_ALIAS
#else
#define KMP_ALIGN(bytes)
#define KMP_THREAD_LOCAL
#define KMP_ALIAS(alias_of)
#endif

#if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB
#define KMP_WEAK_ATTRIBUTE_EXTERNAL
#else
#define KMP_WEAK_ATTRIBUTE_EXTERNAL
#endif

#if KMP_HAVE_WEAK_ATTRIBUTE
#define KMP_WEAK_ATTRIBUTE_INTERNAL
#else
#define KMP_WEAK_ATTRIBUTE_INTERNAL
#endif

// Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME
#ifndef KMP_STR
#define KMP_STR(x)
#define _KMP_STR(x)
#endif

#ifdef KMP_USE_VERSION_SYMBOLS
// If using versioned symbols, KMP_EXPAND_NAME prepends
// __kmp_api_ to the real API name
#define KMP_EXPAND_NAME(api_name)
#define _KMP_EXPAND_NAME(api_name)
#define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str)
#define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver)

#define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str)
#define _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str,          \
                                 default_ver)

#else // KMP_USE_VERSION_SYMBOLS
#define KMP_EXPAND_NAME
#define KMP_VERSION_SYMBOL
#define KMP_VERSION_OMPC_SYMBOL
#endif // KMP_USE_VERSION_SYMBOLS

/* Temporary note: if performance testing of this passes, we can remove
   all references to KMP_DO_ALIGN and replace with KMP_ALIGN.  */
#define KMP_DO_ALIGN(bytes)
#define KMP_ALIGN_CACHE
#define KMP_ALIGN_CACHE_INTERNODE

/* General purpose fence types for memory operations */
enum kmp_mem_fence_type {};

// Synchronization primitives

#if KMP_ASM_INTRINS && KMP_OS_WINDOWS && !((KMP_ARCH_AARCH64 || KMP_ARCH_ARM) && (KMP_COMPILER_CLANG || KMP_COMPILER_GCC))

#if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG
#pragma intrinsic(InterlockedExchangeAdd)
#pragma intrinsic(InterlockedCompareExchange)
#pragma intrinsic(InterlockedExchange)
#if !KMP_32_BIT_ARCH
#pragma intrinsic(InterlockedExchange64)
#endif
#endif

// Using InterlockedIncrement / InterlockedDecrement causes a library loading
// ordering problem, so we use InterlockedExchangeAdd instead.
#define KMP_TEST_THEN_INC32
#define KMP_TEST_THEN_INC_ACQ32
#define KMP_TEST_THEN_ADD4_32
#define KMP_TEST_THEN_ADD4_ACQ32
#define KMP_TEST_THEN_DEC32
#define KMP_TEST_THEN_DEC_ACQ32
#define KMP_TEST_THEN_ADD32

#define KMP_COMPARE_AND_STORE_RET32

#define KMP_XCHG_FIXED32
#define KMP_XCHG_FIXED64

inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
  kmp_int32 tmp = InterlockedExchange((volatile long *)p, *(long *)&v);
  return *(kmp_real32 *)&tmp;
}

#define KMP_TEST_THEN_OR8
#define KMP_TEST_THEN_AND8
#define KMP_TEST_THEN_OR32
#define KMP_TEST_THEN_AND32
#define KMP_TEST_THEN_OR64
#define KMP_TEST_THEN_AND64

extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);

#if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC && !KMP_COMPILER_CLANG
#define KMP_TEST_THEN_INC64
#define KMP_TEST_THEN_INC_ACQ64
#define KMP_TEST_THEN_ADD4_64
// #define KMP_TEST_THEN_ADD4_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 4LL)
// #define KMP_TEST_THEN_DEC64(p) _InterlockedExchangeAdd64((p), -1LL)
// #define KMP_TEST_THEN_DEC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), -1LL)
// #define KMP_TEST_THEN_ADD8(p, v) _InterlockedExchangeAdd8((p), (v))
#define KMP_TEST_THEN_ADD64

#define KMP_COMPARE_AND_STORE_ACQ8
#define KMP_COMPARE_AND_STORE_REL8
#define KMP_COMPARE_AND_STORE_ACQ16
/*
#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv)                                 \
  __kmp_compare_and_store_rel16((p), (cv), (sv))
*/
#define KMP_COMPARE_AND_STORE_ACQ32
#define KMP_COMPARE_AND_STORE_REL32
#define KMP_COMPARE_AND_STORE_ACQ64
#define KMP_COMPARE_AND_STORE_REL64
#define KMP_COMPARE_AND_STORE_PTR

//  KMP_COMPARE_AND_STORE expects this order:       pointer, compare, exchange
// _InterlockedCompareExchange expects this order:  pointer, exchange, compare
// KMP_COMPARE_AND_STORE also returns a bool indicating a successful write. A
// write is successful if the return value of _InterlockedCompareExchange is the
// same as the compare value.
inline kmp_int8 __kmp_compare_and_store_acq8(volatile kmp_int8 *p, kmp_int8 cv,
                                             kmp_int8 sv) {
  return _InterlockedCompareExchange8_acq(p, sv, cv) == cv;
}

inline kmp_int8 __kmp_compare_and_store_rel8(volatile kmp_int8 *p, kmp_int8 cv,
                                             kmp_int8 sv) {
  return _InterlockedCompareExchange8_rel(p, sv, cv) == cv;
}

inline kmp_int16 __kmp_compare_and_store_acq16(volatile kmp_int16 *p,
                                               kmp_int16 cv, kmp_int16 sv) {
  return _InterlockedCompareExchange16_acq(p, sv, cv) == cv;
}

inline kmp_int16 __kmp_compare_and_store_rel16(volatile kmp_int16 *p,
                                               kmp_int16 cv, kmp_int16 sv) {
  return _InterlockedCompareExchange16_rel(p, sv, cv) == cv;
}

inline kmp_int32 __kmp_compare_and_store_acq32(volatile kmp_int32 *p,
                                               kmp_int32 cv, kmp_int32 sv) {
  return _InterlockedCompareExchange_acq((volatile long *)p, sv, cv) == cv;
}

inline kmp_int32 __kmp_compare_and_store_rel32(volatile kmp_int32 *p,
                                               kmp_int32 cv, kmp_int32 sv) {
  return _InterlockedCompareExchange_rel((volatile long *)p, sv, cv) == cv;
}

inline kmp_int32 __kmp_compare_and_store_acq64(volatile kmp_int64 *p,
                                               kmp_int64 cv, kmp_int64 sv) {
  return _InterlockedCompareExchange64_acq(p, sv, cv) == cv;
}

inline kmp_int32 __kmp_compare_and_store_rel64(volatile kmp_int64 *p,
                                               kmp_int64 cv, kmp_int64 sv) {
  return _InterlockedCompareExchange64_rel(p, sv, cv) == cv;
}

inline kmp_int32 __kmp_compare_and_store_ptr(void *volatile *p, void *cv,
                                             void *sv) {
  return _InterlockedCompareExchangePointer(p, sv, cv) == cv;
}

// The _RET versions return the value instead of a bool

#define KMP_COMPARE_AND_STORE_RET8
#define KMP_COMPARE_AND_STORE_RET16

#define KMP_COMPARE_AND_STORE_RET64


#define KMP_XCHG_FIXED8
#define KMP_XCHG_FIXED16
#define KMP_XCHG_REAL64

inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) {
  kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64
  *)&v); return *(kmp_real64 *)&tmp;
}

#else // !KMP_ARCH_AARCH64

// Routines that we still need to implement in assembly.
extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);

extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
                                         kmp_int8 sv);
extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
                                           kmp_int16 sv);
extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
                                           kmp_int32 sv);
extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
                                           kmp_int64 sv);
extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
                                             kmp_int8 sv);
extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
                                               kmp_int16 cv, kmp_int16 sv);
extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
                                               kmp_int32 cv, kmp_int32 sv);
extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
                                               kmp_int64 cv, kmp_int64 sv);

extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);

//#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1)
//#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1)
#define KMP_TEST_THEN_INC64
#define KMP_TEST_THEN_INC_ACQ64
//#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4)
//#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4)
#define KMP_TEST_THEN_ADD4_64
#define KMP_TEST_THEN_ADD4_ACQ64
//#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1)
//#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1)
#define KMP_TEST_THEN_DEC64
#define KMP_TEST_THEN_DEC_ACQ64
//#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v))
#define KMP_TEST_THEN_ADD8
#define KMP_TEST_THEN_ADD64


#define KMP_COMPARE_AND_STORE_ACQ8
#define KMP_COMPARE_AND_STORE_REL8
#define KMP_COMPARE_AND_STORE_ACQ16
#define KMP_COMPARE_AND_STORE_REL16
#define KMP_COMPARE_AND_STORE_ACQ32
#define KMP_COMPARE_AND_STORE_REL32
#define KMP_COMPARE_AND_STORE_ACQ64
#define KMP_COMPARE_AND_STORE_REL64

#if KMP_ARCH_X86
#define KMP_COMPARE_AND_STORE_PTR
#else /* 64 bit pointers */
#define KMP_COMPARE_AND_STORE_PTR
#endif /* KMP_ARCH_X86 */

#define KMP_COMPARE_AND_STORE_RET8
#define KMP_COMPARE_AND_STORE_RET16
#define KMP_COMPARE_AND_STORE_RET64

#define KMP_XCHG_FIXED8
#define KMP_XCHG_FIXED16
//#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
//#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
//#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
#define KMP_XCHG_REAL64
#endif

#elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64)

/* cast p to correct type so that proper intrinsic will be used */
#define KMP_TEST_THEN_INC32(p)
#define KMP_TEST_THEN_INC_ACQ32(p)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_INC64
#define KMP_TEST_THEN_INC_ACQ64
#else
#define KMP_TEST_THEN_INC64(p)
#define KMP_TEST_THEN_INC_ACQ64(p)
#endif
#define KMP_TEST_THEN_ADD4_32(p)
#define KMP_TEST_THEN_ADD4_ACQ32(p)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_ADD4_64
#define KMP_TEST_THEN_ADD4_ACQ64
#define KMP_TEST_THEN_DEC64
#define KMP_TEST_THEN_DEC_ACQ64
#else
#define KMP_TEST_THEN_ADD4_64(p)
#define KMP_TEST_THEN_ADD4_ACQ64(p)
#define KMP_TEST_THEN_DEC64(p)
#define KMP_TEST_THEN_DEC_ACQ64(p)
#endif
#define KMP_TEST_THEN_DEC32(p)
#define KMP_TEST_THEN_DEC_ACQ32(p)
#define KMP_TEST_THEN_ADD8(p, v)
#define KMP_TEST_THEN_ADD32(p, v)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_ADD64
#else
#define KMP_TEST_THEN_ADD64(p, v)
#endif

#define KMP_TEST_THEN_OR8(p, v)
#define KMP_TEST_THEN_AND8(p, v)
#define KMP_TEST_THEN_OR32(p, v)
#define KMP_TEST_THEN_AND32(p, v)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_OR64
#define KMP_TEST_THEN_AND64
#else
#define KMP_TEST_THEN_OR64(p, v)
#define KMP_TEST_THEN_AND64(p, v)
#endif

#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv)
#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv)
#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv)
#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv)
#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv)
#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv)
#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv)

#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv)
#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv)
#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv)
#if KMP_ARCH_MIPS
static inline bool mips_sync_bool_compare_and_swap(volatile kmp_uint64 *p,
                                                   kmp_uint64 cv,
                                                   kmp_uint64 sv) {
  return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
                                   __ATOMIC_SEQ_CST);
}
static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64 *p,
                                                  kmp_uint64 cv,
                                                  kmp_uint64 sv) {
  __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
                            __ATOMIC_SEQ_CST);
  return cv;
}
#define KMP_COMPARE_AND_STORE_ACQ64
#define KMP_COMPARE_AND_STORE_REL64
#define KMP_COMPARE_AND_STORE_RET64
#else
#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv)
#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv)
#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv)
#endif

#if KMP_OS_DARWIN && defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1800
#define KMP_XCHG_FIXED8
#else
#define KMP_XCHG_FIXED8(p, v)
#endif
#define KMP_XCHG_FIXED16(p, v)
#define KMP_XCHG_FIXED32(p, v)
#define KMP_XCHG_FIXED64(p, v)

inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {}

inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) {}

#else

extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);

extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
                                         kmp_int8 sv);
extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
                                           kmp_int16 sv);
extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
                                           kmp_int32 sv);
extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
                                           kmp_int64 sv);
extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
                                             kmp_int8 sv);
extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
                                               kmp_int16 cv, kmp_int16 sv);
extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
                                               kmp_int32 cv, kmp_int32 sv);
extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
                                               kmp_int64 cv, kmp_int64 sv);

extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);

#define KMP_TEST_THEN_INC32
#define KMP_TEST_THEN_INC_ACQ32
#define KMP_TEST_THEN_INC64
#define KMP_TEST_THEN_INC_ACQ64
#define KMP_TEST_THEN_ADD4_32
#define KMP_TEST_THEN_ADD4_ACQ32
#define KMP_TEST_THEN_ADD4_64
#define KMP_TEST_THEN_ADD4_ACQ64
#define KMP_TEST_THEN_DEC32
#define KMP_TEST_THEN_DEC_ACQ32
#define KMP_TEST_THEN_DEC64
#define KMP_TEST_THEN_DEC_ACQ64
#define KMP_TEST_THEN_ADD8
#define KMP_TEST_THEN_ADD32
#define KMP_TEST_THEN_ADD64

#define KMP_TEST_THEN_OR8
#define KMP_TEST_THEN_AND8
#define KMP_TEST_THEN_OR32
#define KMP_TEST_THEN_AND32
#define KMP_TEST_THEN_OR64
#define KMP_TEST_THEN_AND64

#define KMP_COMPARE_AND_STORE_ACQ8
#define KMP_COMPARE_AND_STORE_REL8
#define KMP_COMPARE_AND_STORE_ACQ16
#define KMP_COMPARE_AND_STORE_REL16
#define KMP_COMPARE_AND_STORE_ACQ32
#define KMP_COMPARE_AND_STORE_REL32
#define KMP_COMPARE_AND_STORE_ACQ64
#define KMP_COMPARE_AND_STORE_REL64

#if KMP_ARCH_X86
#define KMP_COMPARE_AND_STORE_PTR
#else /* 64 bit pointers */
#define KMP_COMPARE_AND_STORE_PTR
#endif /* KMP_ARCH_X86 */

#define KMP_COMPARE_AND_STORE_RET8
#define KMP_COMPARE_AND_STORE_RET16
#define KMP_COMPARE_AND_STORE_RET32
#define KMP_COMPARE_AND_STORE_RET64

#define KMP_XCHG_FIXED8
#define KMP_XCHG_FIXED16
#define KMP_XCHG_FIXED32
#define KMP_XCHG_FIXED64
#define KMP_XCHG_REAL32
#define KMP_XCHG_REAL64

#endif /* KMP_ASM_INTRINS */

/* ------------- relaxed consistency memory model stuff ------------------ */

#if KMP_OS_WINDOWS
#ifdef __ABSOFT_WIN
#define KMP_MB
#define KMP_IMB
#else
#define KMP_MB
#define KMP_IMB
#endif
#endif /* KMP_OS_WINDOWS */

#if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS ||     \
    KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 ||             \
    KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC || KMP_ARCH_AARCH64_32
#if KMP_OS_WINDOWS
#undef KMP_MB
#define KMP_MB
#else /* !KMP_OS_WINDOWS */
#define KMP_MB
#endif
#endif

#ifndef KMP_MB
#define KMP_MB()
#endif

#if KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_MIC
// fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
// We shouldn't need it, though, since the ABI rules require that
// * If the compiler generates NGO stores it also generates the fence
// * If users hand-code NGO stores they should insert the fence
// therefore no incomplete unordered stores should be visible.
#define KMP_MFENCE
#define KMP_SFENCE
#else
#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
#define KMP_MFENCE_
#define KMP_SFENCE_
#elif KMP_COMPILER_MSVC
#define KMP_MFENCE_
#define KMP_SFENCE_
#else
#define KMP_MFENCE_()
#define KMP_SFENCE_()
#endif
#define KMP_MFENCE()
#define KMP_SFENCE()
#endif
#else
#define KMP_MFENCE
#define KMP_SFENCE
#endif

#ifndef KMP_IMB
#define KMP_IMB()
#endif

#ifndef KMP_ST_REL32
#define KMP_ST_REL32(A, D)
#endif

#ifndef KMP_ST_REL64
#define KMP_ST_REL64(A, D)
#endif

#ifndef KMP_LD_ACQ32
#define KMP_LD_ACQ32(A)
#endif

#ifndef KMP_LD_ACQ64
#define KMP_LD_ACQ64(A)
#endif

/* ------------------------------------------------------------------------ */
// FIXME - maybe this should this be
//
// #define TCR_4(a)    (*(volatile kmp_int32 *)(&a))
// #define TCW_4(a,b)  (a) = (*(volatile kmp_int32 *)&(b))
//
// #define TCR_8(a)    (*(volatile kmp_int64 *)(a))
// #define TCW_8(a,b)  (a) = (*(volatile kmp_int64 *)(&b))
//
// I'm fairly certain this is the correct thing to do, but I'm afraid
// of performance regressions.

#define TCR_1(a)
#define TCW_1(a, b)
#define TCR_4(a)
#define TCW_4(a, b)
#define TCI_4(a)
#define TCD_4(a)
#define TCR_8(a)
#define TCW_8(a, b)
#define TCI_8(a)
#define TCD_8(a)
#define TCR_SYNC_4(a)
#define TCW_SYNC_4(a, b)
#define TCX_SYNC_4(a, b, c)
#define TCR_SYNC_8(a)
#define TCW_SYNC_8(a, b)
#define TCX_SYNC_8(a, b, c)

#if KMP_ARCH_X86 || KMP_ARCH_MIPS || KMP_ARCH_WASM || KMP_ARCH_PPC
// What about ARM?
#define TCR_PTR
#define TCW_PTR
#define TCR_SYNC_PTR
#define TCW_SYNC_PTR
#define TCX_SYNC_PTR

#else /* 64 bit pointers */

#define TCR_PTR(a)
#define TCW_PTR(a, b)
#define TCR_SYNC_PTR(a)
#define TCW_SYNC_PTR(a, b)
#define TCX_SYNC_PTR(a, b, c)

#endif /* KMP_ARCH_X86 */

/* If these FTN_{TRUE,FALSE} values change, may need to change several places
   where they are used to check that language is Fortran, not C. */

#ifndef FTN_TRUE
#define FTN_TRUE
#endif

#ifndef FTN_FALSE
#define FTN_FALSE
#endif

microtask_t;

#ifdef USE_VOLATILE_CAST
#define VOLATILE_CAST
#else
#define VOLATILE_CAST(x)
#endif

#define KMP_WAIT
#define KMP_WAIT_PTR
#define KMP_EQ
#define KMP_NEQ
#define KMP_LT
#define KMP_GE
#define KMP_LE

/* Workaround for Intel(R) 64 code gen bug when taking address of static array
 * (Intel(R) 64 Tracker #138) */
#if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX
#define STATIC_EFI2_WORKAROUND
#else
#define STATIC_EFI2_WORKAROUND
#endif

// Support of BGET usage
#ifndef KMP_USE_BGET
#define KMP_USE_BGET
#endif

// Switches for OSS builds
#ifndef USE_CMPXCHG_FIX
#define USE_CMPXCHG_FIX
#endif

// Enable dynamic user lock
#define KMP_USE_DYNAMIC_LOCK

// Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if
// dynamic user lock is turned on
#if KMP_USE_DYNAMIC_LOCK
// Visual studio can't handle the asm sections in this code
#define KMP_USE_TSX
#ifdef KMP_USE_ADAPTIVE_LOCKS
#undef KMP_USE_ADAPTIVE_LOCKS
#endif
#define KMP_USE_ADAPTIVE_LOCKS
#endif

// Enable tick time conversion of ticks to seconds
#if KMP_STATS_ENABLED
#define KMP_HAVE_TICK_TIME
#endif

// Warning levels
enum kmp_warnings_level {};

#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus

// Safe C API
#include "kmp_safe_c_api.h"

// Macros for C++11 atomic functions
#define KMP_ATOMIC_LD(p, order)
#define KMP_ATOMIC_OP(op, p, v, order)

// For non-default load/store
#define KMP_ATOMIC_LD_ACQ(p)
#define KMP_ATOMIC_LD_RLX(p)
#define KMP_ATOMIC_ST_REL(p, v)
#define KMP_ATOMIC_ST_RLX(p, v)

// For non-default fetch_<op>
#define KMP_ATOMIC_ADD(p, v)
#define KMP_ATOMIC_SUB(p, v)
#define KMP_ATOMIC_AND(p, v)
#define KMP_ATOMIC_OR(p, v)
#define KMP_ATOMIC_INC(p)
#define KMP_ATOMIC_DEC(p)
#define KMP_ATOMIC_ADD_RLX(p, v)
#define KMP_ATOMIC_INC_RLX(p)

// Callers of the following functions cannot see the side effect on "expected".
template <typename T>
bool __kmp_atomic_compare_store(std::atomic<T> *p, T expected, T desired) {}

template <typename T>
bool __kmp_atomic_compare_store_acq(std::atomic<T> *p, T expected, T desired) {}

template <typename T>
bool __kmp_atomic_compare_store_rel(std::atomic<T> *p, T expected, T desired) {}

// Symbol lookup on Linux/Windows
#if KMP_OS_WINDOWS
extern void *__kmp_lookup_symbol(const char *name, bool next = false);
#define KMP_DLSYM
#define KMP_DLSYM_NEXT
#elif KMP_OS_WASI || KMP_OS_EMSCRIPTEN
#define KMP_DLSYM
#define KMP_DLSYM_NEXT
#else
#define KMP_DLSYM(name)
#define KMP_DLSYM_NEXT(name)
#endif

// MSVC doesn't have this, but clang/clang-cl does.
#ifndef __has_builtin
#define __has_builtin
#endif

// Same as LLVM_BUILTIN_UNREACHABLE. States that it is UB to reach this point.
#if __has_builtin(__builtin_unreachable) || defined(__GNUC__)
#define KMP_BUILTIN_UNREACHABLE
#elif defined(_MSC_VER)
#define KMP_BUILTIN_UNREACHABLE
#else
#define KMP_BUILTIN_UNREACHABLE
#endif

#endif /* KMP_OS_H */