//===-- Memory utils --------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_UTILS_H #define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_UTILS_H #include "src/__support/CPP/bit.h" #include "src/__support/CPP/cstddef.h" #include "src/__support/CPP/type_traits.h" #include "src/__support/endian.h" #include "src/__support/macros/attributes.h" // LIBC_INLINE #include "src/__support/macros/config.h" #include "src/__support/macros/properties/architectures.h" #include <stddef.h> // size_t #include <stdint.h> // intptr_t / uintptr_t / INT32_MAX / INT32_MIN namespace LIBC_NAMESPACE_DECL { // Returns the number of bytes to substract from ptr to get to the previous // multiple of alignment. If ptr is already aligned returns 0. template <size_t alignment> LIBC_INLINE uintptr_t distance_to_align_down(const void *ptr) { … } // Returns the number of bytes to add to ptr to get to the next multiple of // alignment. If ptr is already aligned returns 0. template <size_t alignment> LIBC_INLINE uintptr_t distance_to_align_up(const void *ptr) { … } // Returns the number of bytes to add to ptr to get to the next multiple of // alignment. If ptr is already aligned returns alignment. template <size_t alignment> LIBC_INLINE uintptr_t distance_to_next_aligned(const void *ptr) { … } // Returns the same pointer but notifies the compiler that it is aligned. template <size_t alignment, typename T> LIBC_INLINE T *assume_aligned(T *ptr) { … } // Returns true iff memory regions [p1, p1 + size] and [p2, p2 + size] are // disjoint. LIBC_INLINE bool is_disjoint(const void *p1, const void *p2, size_t size) { … } #if __has_builtin(__builtin_memcpy_inline) #define LLVM_LIBC_HAS_BUILTIN_MEMCPY_INLINE #endif #if __has_builtin(__builtin_memset_inline) #define LLVM_LIBC_HAS_BUILTIN_MEMSET_INLINE #endif // Performs a constant count copy. template <size_t Size> LIBC_INLINE void memcpy_inline(void *__restrict dst, const void *__restrict src) { … } Ptr; // Pointer to raw data. CPtr; // Const pointer to raw data. // This type makes sure that we don't accidentally promote an integral type to // another one. It is only constructible from the exact T type. template <typename T> struct StrictIntegralType { … }; MemcmpReturnType; BcmpReturnType; // This implements the semantic of 'memcmp' returning a negative value when 'a' // is less than 'b', '0' when 'a' equals 'b' and a positive number otherwise. LIBC_INLINE MemcmpReturnType cmp_uint32_t(uint32_t a, uint32_t b) { … } // Returns a negative value if 'a' is less than 'b' and a positive value // otherwise. This implements the semantic of 'memcmp' when we know that 'a' and // 'b' differ. LIBC_INLINE MemcmpReturnType cmp_neq_uint64_t(uint64_t a, uint64_t b) { … } // Loads bytes from memory (possibly unaligned) and materializes them as // type. template <typename T> LIBC_INLINE T load(CPtr ptr) { … } // Stores a value of type T in memory (possibly unaligned). template <typename T> LIBC_INLINE void store(Ptr ptr, T value) { … } // On architectures that do not allow for unaligned access we perform several // aligned accesses and recombine them through shifts and logicals operations. // For instance, if we know that the pointer is 2-byte aligned we can decompose // a 64-bit operation into four 16-bit operations. // Loads a 'ValueType' by decomposing it into several loads that are assumed to // be aligned. // e.g. load_aligned<uint32_t, uint16_t, uint16_t>(ptr); template <typename ValueType, typename T, typename... TS> LIBC_INLINE ValueType load_aligned(CPtr src) { … } // Alias for loading a 'uint32_t'. template <typename T, typename... TS> LIBC_INLINE auto load32_aligned(CPtr src, size_t offset) { … } // Alias for loading a 'uint64_t'. template <typename T, typename... TS> LIBC_INLINE auto load64_aligned(CPtr src, size_t offset) { … } // Stores a 'ValueType' by decomposing it into several stores that are assumed // to be aligned. // e.g. store_aligned<uint32_t, uint16_t, uint16_t>(value, ptr); template <typename ValueType, typename T, typename... TS> LIBC_INLINE void store_aligned(ValueType value, Ptr dst) { … } // Alias for storing a 'uint32_t'. template <typename T, typename... TS> LIBC_INLINE void store32_aligned(uint32_t value, Ptr dst, size_t offset) { … } // Alias for storing a 'uint64_t'. template <typename T, typename... TS> LIBC_INLINE void store64_aligned(uint64_t value, Ptr dst, size_t offset) { … } // Advances the pointers p1 and p2 by offset bytes and decrease count by the // same amount. template <typename T1, typename T2> LIBC_INLINE void adjust(ptrdiff_t offset, T1 *__restrict &p1, T2 *__restrict &p2, size_t &count) { … } // Advances p1 and p2 so p1 gets aligned to the next SIZE bytes boundary // and decrease count by the same amount. // We make sure the compiler knows about the adjusted pointer alignment. template <size_t SIZE, typename T1, typename T2> void align_p1_to_next_boundary(T1 *__restrict &p1, T2 *__restrict &p2, size_t &count) { … } // Same as align_p1_to_next_boundary above but with a single pointer instead. template <size_t SIZE, typename T> LIBC_INLINE void align_to_next_boundary(T *&p1, size_t &count) { … } // An enum class that discriminates between the first and second pointer. enum class Arg { … }; // Same as align_p1_to_next_boundary but allows for aligning p2 instead of p1. // Precondition: &p1 != &p2 template <size_t SIZE, Arg AlignOn, typename T1, typename T2> LIBC_INLINE void align_to_next_boundary(T1 *__restrict &p1, T2 *__restrict &p2, size_t &count) { … } template <size_t SIZE> struct AlignHelper { … }; LIBC_INLINE void prefetch_for_write(CPtr dst) { … } LIBC_INLINE void prefetch_to_local_cache(CPtr dst) { … } } // namespace LIBC_NAMESPACE_DECL #endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_UTILS_H