chromium/third_party/eigen3/src/Eigen/src/Core/util/Memory.h

// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
// Copyright (C) 2008-2009 Benoit Jacob <[email protected]>
// Copyright (C) 2009 Kenneth Riddile <[email protected]>
// Copyright (C) 2010 Hauke Heibel <[email protected]>
// Copyright (C) 2010 Thomas Capricelli <[email protected]>
// Copyright (C) 2013 Pavel Holoborodko <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.

/*****************************************************************************
*** Platform checks for aligned malloc functions                           ***
*****************************************************************************/

#ifndef EIGEN_MEMORY_H
#define EIGEN_MEMORY_H

#ifndef EIGEN_MALLOC_ALREADY_ALIGNED

// Try to determine automatically if malloc is already aligned.

// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
//   http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
// This is true at least since glibc 2.8.
// This leaves the question how to detect 64-bit. According to this document,
//   http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
#if defined(__GLIBC__) && ((__GLIBC__ >= 2 && __GLIBC_MINOR__ >= 8) || __GLIBC__ > 2) && defined(__LP64__) && \
    !defined(__SANITIZE_ADDRESS__) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED
#else
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED
#endif

// FreeBSD 6 seems to have 16-byte aligned malloc
//   See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
//   See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
#if defined(__FreeBSD__) && !(EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
#else
#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
#endif

#if (EIGEN_OS_MAC && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) || (EIGEN_OS_WIN64 && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) || \
    EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
#define EIGEN_MALLOC_ALREADY_ALIGNED
#else
#define EIGEN_MALLOC_ALREADY_ALIGNED
#endif

#endif

#ifndef EIGEN_MALLOC_CHECK_THREAD_LOCAL

// Check whether we can use the thread_local keyword to allow or disallow
// allocating memory with per-thread granularity, by means of the
// set_is_malloc_allowed() function.
#ifndef EIGEN_AVOID_THREAD_LOCAL

#if ((EIGEN_COMP_GNUC) || __has_feature(cxx_thread_local) || EIGEN_COMP_MSVC >= 1900) && \
    !defined(EIGEN_GPU_COMPILE_PHASE)
#define EIGEN_MALLOC_CHECK_THREAD_LOCAL
#else
#define EIGEN_MALLOC_CHECK_THREAD_LOCAL
#endif

#else  // EIGEN_AVOID_THREAD_LOCAL
#define EIGEN_MALLOC_CHECK_THREAD_LOCAL
#endif  // EIGEN_AVOID_THREAD_LOCAL

#endif

// IWYU pragma: private
#include "../InternalHeaderCheck.h"

namespace Eigen {

namespace internal {

/*****************************************************************************
*** Implementation of portable aligned versions of malloc/free/realloc     ***
*****************************************************************************/

#ifdef EIGEN_NO_MALLOC
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() {
  eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
}
#elif defined EIGEN_RUNTIME_NO_MALLOC
EIGEN_DEVICE_FUNC inline bool is_malloc_allowed_impl(bool update, bool new_value = false) {
  EIGEN_MALLOC_CHECK_THREAD_LOCAL static bool value = true;
  if (update == 1) value = new_value;
  return value;
}
EIGEN_DEVICE_FUNC inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
EIGEN_DEVICE_FUNC inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() {
  eigen_assert(is_malloc_allowed() &&
               "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
}
#else
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() {}
#endif

EIGEN_DEVICE_FUNC inline void throw_std_bad_alloc() {}

/*****************************************************************************
*** Implementation of handmade aligned functions                           ***
*****************************************************************************/

/* ----- Hand made implementations of aligned malloc/free and realloc ----- */

/** \internal Like malloc, but the returned pointer is guaranteed to be aligned to `alignment`.
 * Fast, but wastes `alignment` additional bytes of memory. Does not throw any exception.
 */
EIGEN_DEVICE_FUNC inline void* handmade_aligned_malloc(std::size_t size,
                                                       std::size_t alignment = EIGEN_DEFAULT_ALIGN_BYTES) {}

/** \internal Frees memory allocated with handmade_aligned_malloc */
EIGEN_DEVICE_FUNC inline void handmade_aligned_free(void* ptr) {}

/** \internal
 * \brief Reallocates aligned memory.
 * Since we know that our handmade version is based on std::malloc
 * we can use std::realloc to implement efficient reallocation.
 */
EIGEN_DEVICE_FUNC inline void* handmade_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size,
                                                        std::size_t alignment = EIGEN_DEFAULT_ALIGN_BYTES) {}

/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on
 * the requirements. On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
 */
EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size) {}

/** \internal Frees memory allocated with aligned_malloc. */
EIGEN_DEVICE_FUNC inline void aligned_free(void* ptr) {}

/**
 * \internal
 * \brief Reallocates an aligned block of memory.
 * \throws std::bad_alloc on allocation failure
 */
EIGEN_DEVICE_FUNC inline void* aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size) {}

/*****************************************************************************
*** Implementation of conditionally aligned functions                      ***
*****************************************************************************/

/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
 * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
 */
template <bool Align>
EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size) {}

template <>
EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc<false>(std::size_t size) {}

/** \internal Frees memory allocated with conditional_aligned_malloc */
template <bool Align>
EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void* ptr) {}

template <>
EIGEN_DEVICE_FUNC inline void conditional_aligned_free<false>(void* ptr) {}

template <bool Align>
EIGEN_DEVICE_FUNC inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size) {}

template <>
EIGEN_DEVICE_FUNC inline void* conditional_aligned_realloc<false>(void* ptr, std::size_t new_size,
                                                                  std::size_t old_size) {}

/*****************************************************************************
*** Construction/destruction of array elements                             ***
*****************************************************************************/

/** \internal Destructs the elements of an array.
 * The \a size parameters tells on how many objects to call the destructor of T.
 */
template <typename T>
EIGEN_DEVICE_FUNC inline void destruct_elements_of_array(T* ptr, std::size_t size) {}

/** \internal Constructs the elements of an array.
 * The \a size parameter tells on how many objects to call the constructor of T.
 */
template <typename T>
EIGEN_DEVICE_FUNC inline T* default_construct_elements_of_array(T* ptr, std::size_t size) {}

/** \internal Copy-constructs the elements of an array.
 * The \a size parameter tells on how many objects to copy.
 */
template <typename T>
EIGEN_DEVICE_FUNC inline T* copy_construct_elements_of_array(T* ptr, const T* src, std::size_t size) {}

/** \internal Move-constructs the elements of an array.
 * The \a size parameter tells on how many objects to move.
 */
template <typename T>
EIGEN_DEVICE_FUNC inline T* move_construct_elements_of_array(T* ptr, T* src, std::size_t size) {}

/*****************************************************************************
*** Implementation of aligned new/delete-like functions                    ***
*****************************************************************************/

template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow(std::size_t size) {}

/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
 * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
 * The default constructor of T is called.
 */
template <typename T>
EIGEN_DEVICE_FUNC inline T* aligned_new(std::size_t size) {}

template <typename T, bool Align>
EIGEN_DEVICE_FUNC inline T* conditional_aligned_new(std::size_t size) {}

/** \internal Deletes objects constructed with aligned_new
 * The \a size parameters tells on how many objects to call the destructor of T.
 */
template <typename T>
EIGEN_DEVICE_FUNC inline void aligned_delete(T* ptr, std::size_t size) {}

/** \internal Deletes objects constructed with conditional_aligned_new
 * The \a size parameters tells on how many objects to call the destructor of T.
 */
template <typename T, bool Align>
EIGEN_DEVICE_FUNC inline void conditional_aligned_delete(T* ptr, std::size_t size) {}

template <typename T, bool Align>
EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new(T* pts, std::size_t new_size, std::size_t old_size) {}

template <typename T, bool Align>
EIGEN_DEVICE_FUNC inline T* conditional_aligned_new_auto(std::size_t size) {}

template <typename T, bool Align>
EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new_auto(T* pts, std::size_t new_size, std::size_t old_size) {}

template <typename T, bool Align>
EIGEN_DEVICE_FUNC inline void conditional_aligned_delete_auto(T* ptr, std::size_t size) {}

/****************************************************************************/

/** \internal Returns the index of the first element of the array that is well aligned with respect to the requested \a
 * Alignment.
 *
 * \tparam Alignment requested alignment in Bytes.
 * \param array the address of the start of the array
 * \param size the size of the array
 *
 * \note If no element of the array is well aligned or the requested alignment is not a multiple of a scalar,
 * the size of the array is returned. For example with SSE, the requested alignment is typically 16-bytes. If
 * packet size for the given scalar type is 1, then everything is considered well-aligned.
 *
 * \note Otherwise, if the Alignment is larger that the scalar size, we rely on the assumptions that sizeof(Scalar) is a
 * power of 2. On the other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails
 * for example with Scalar=double on certain 32-bit platforms, see bug #79.
 *
 * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
 * \sa first_default_aligned()
 */
template <int Alignment, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size) {}

/** \internal Returns the index of the first element of the array that is well aligned with respect the largest packet
 * requirement. \sa first_aligned(Scalar*,Index) and first_default_aligned(DenseBase<Derived>) */
template <typename Scalar, typename Index>
EIGEN_DEVICE_FUNC inline Index first_default_aligned(const Scalar* array, Index size) {}

/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
 */
template <typename Index>
inline Index first_multiple(Index size, Index base) {}

// std::copy is much slower than memcpy, so let's introduce a smart_copy which
// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
template <typename T, bool UseMemcpy>
struct smart_copy_helper;

template <typename T>
EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target) {}

smart_copy_helper<T, true>;

smart_copy_helper<T, false>;

// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise.
template <typename T, bool UseMemmove>
struct smart_memmove_helper;

template <typename T>
void smart_memmove(const T* start, const T* end, T* target) {}

smart_memmove_helper<T, true>;

smart_memmove_helper<T, false>;

template <typename T>
EIGEN_DEVICE_FUNC T* smart_move(T* start, T* end, T* target) {}

/*****************************************************************************
*** Implementation of runtime stack allocation (falling back to malloc)    ***
*****************************************************************************/

// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
// to the appropriate stack allocation function
#if !defined EIGEN_ALLOCA && !defined EIGEN_GPU_COMPILE_PHASE
#if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca)
#define EIGEN_ALLOCA
#elif EIGEN_COMP_MSVC
#define EIGEN_ALLOCA
#endif
#endif

// With clang -Oz -mthumb, alloca changes the stack pointer in a way that is
// not allowed in Thumb2. -DEIGEN_STACK_ALLOCATION_LIMIT=0 doesn't work because
// the compiler still emits bad code because stack allocation checks use "<=".
// TODO: Eliminate after https://bugs.llvm.org/show_bug.cgi?id=23772
// is fixed.
#if defined(__clang__) && defined(__thumb__)
#undef EIGEN_ALLOCA
#endif

// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
template <typename T>
class aligned_stack_memory_handler : noncopyable {};

#ifdef EIGEN_ALLOCA

template <typename Xpr, int NbEvaluations,
          bool MapExternalBuffer = nested_eval<Xpr, NbEvaluations>::Evaluate && Xpr::MaxSizeAtCompileTime == Dynamic>
struct local_nested_eval_wrapper {};

local_nested_eval_wrapper<Xpr, NbEvaluations, true>;

#endif  // EIGEN_ALLOCA

template <typename T>
class scoped_array : noncopyable {};

template <typename T>
void swap(scoped_array<T>& a, scoped_array<T>& b) {}

}  // end namespace internal

/** \internal
 *
 * The macro ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) declares, allocates,
 * and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
 * if the size in bytes is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the
 * platform (currently, this is Linux, OSX and Visual Studio only). Otherwise the memory is allocated on the heap. The
 * allocated buffer is automatically deleted when exiting the scope of this declaration. If BUFFER is non null, then the
 * declared variable is simply an alias for BUFFER, and no allocation/deletion occurs. Here is an example: \code
 * {
 *   ei_declare_aligned_stack_constructed_variable(float,data,size,0);
 *   // use data[0] to data[size-1]
 * }
 * \endcode
 * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
 *
 * The macro ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) is analogue to
 * \code
 *   typename internal::nested_eval<XPRT_T,N>::type NAME(XPR);
 * \endcode
 * with the advantage of using aligned stack allocation even if the maximal size of XPR at compile time is unknown.
 * This is accomplished through alloca if this later is supported and if the required number of bytes
 * is below EIGEN_STACK_ALLOCATION_LIMIT.
 */
#ifdef EIGEN_ALLOCA

#if EIGEN_DEFAULT_ALIGN_BYTES > 0
// We always manually re-align the result of EIGEN_ALLOCA.
// If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment.

#if (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG)
#define EIGEN_ALIGNED_ALLOCA(SIZE)
#else
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* eigen_aligned_alloca_helper(void* ptr) {
  constexpr std::uintptr_t mask = EIGEN_DEFAULT_ALIGN_BYTES - 1;
  std::uintptr_t ptr_int = std::uintptr_t(ptr);
  std::uintptr_t aligned_ptr_int = (ptr_int + mask) & ~mask;
  std::uintptr_t offset = aligned_ptr_int - ptr_int;
  return static_cast<void*>(static_cast<uint8_t*>(ptr) + offset);
}
#define EIGEN_ALIGNED_ALLOCA
#endif

#else
#define EIGEN_ALIGNED_ALLOCA
#endif

#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)

#define ei_declare_local_nested_eval(XPR_T, XPR, N, NAME)

#else

#define ei_declare_aligned_stack_constructed_variable

#define ei_declare_local_nested_eval

#endif

/*****************************************************************************
*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF]                ***
*****************************************************************************/

#if EIGEN_HAS_CXX17_OVERALIGN

// C++17 -> no need to bother about alignment anymore :)

#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign)
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar, Size)

#else

// HIP does not support new/delete on device.
#if EIGEN_MAX_ALIGN_BYTES != 0 && !defined(EIGEN_HIP_DEVICE_COMPILE)
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF
#else
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF
#endif

#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE

#endif

/****************************************************************************/

/** \class aligned_allocator
 * \ingroup Core_Module
 *
 * \brief STL compatible allocator to use with types requiring a non-standard alignment.
 *
 * The memory is aligned as for dynamically aligned matrix/array types such as MatrixXd.
 * By default, it will thus provide at least 16 bytes alignment and more in following cases:
 *  - 32 bytes alignment if AVX is enabled.
 *  - 64 bytes alignment if AVX512 is enabled.
 *
 * This can be controlled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
 * \link TopicPreprocessorDirectivesPerformance there \endlink.
 *
 * Example:
 * \code
 * // Matrix4f requires 16 bytes alignment:
 * std::map< int, Matrix4f, std::less<int>,
 *           aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
 * // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
 * std::map< int, Vector3f > my_map_vec3;
 * \endcode
 *
 * \sa \blank \ref TopicStlContainers.
 */
template <class T>
class aligned_allocator : public std::allocator<T> {};

//---------- Cache sizes ----------

#if !defined(EIGEN_NO_CPUID)
#if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64
#if defined(__PIC__) && EIGEN_ARCH_i386
// Case for x86 with PIC
#define EIGEN_CPUID
#elif defined(__PIC__) && EIGEN_ARCH_x86_64
// Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with
// the default small code model. However, we cannot detect which code model is used, and the xchg overhead is negligible
// anyway.
#define EIGEN_CPUID(abcd, func, id)
#else
// Case for x86_64 or x86 w/o PIC
#define EIGEN_CPUID
#endif
#elif EIGEN_COMP_MSVC
#if EIGEN_ARCH_i386_OR_x86_64
#define EIGEN_CPUID
#endif
#endif
#endif

namespace internal {

#ifdef EIGEN_CPUID

inline bool cpuid_is_vendor(int abcd[4], const int vendor[3]) {}

inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) {}

inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) {}

inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs) {}

inline void queryCacheSizes_amd(int& l1, int& l2, int& l3) {}
#endif

/** \internal
 * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
inline void queryCacheSizes(int& l1, int& l2, int& l3) {}

/** \internal
 * \returns the size in Bytes of the L1 data cache */
inline int queryL1CacheSize() {}

/** \internal
 * \returns the size in Bytes of the L2 or L3 cache if this later is present */
inline int queryTopLevelCacheSize() {}

/** \internal
 * This wraps C++20's std::construct_at, using placement new instead if it is not available.
 */

#if EIGEN_COMP_CXXVER >= 20
construct_at;
#else
template <class T, class... Args>
EIGEN_DEVICE_FUNC T* construct_at(T* p, Args&&... args) {
  return ::new (const_cast<void*>(static_cast<const volatile void*>(p))) T(std::forward<Args>(args)...);
}
#endif

/** \internal
 * This wraps C++17's std::destroy_at.  If it's not available it calls the destructor.
 * The wrapper is not a full replacement for C++20's std::destroy_at as it cannot
 * be applied to std::array.
 */
#if EIGEN_COMP_CXXVER >= 17
destroy_at;
#else
template <class T>
EIGEN_DEVICE_FUNC void destroy_at(T* p) {
  p->~T();
}
#endif

}  // end namespace internal

}  // end namespace Eigen

#endif  // EIGEN_MEMORY_H