#include "llvm/Support/xxhash.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
#include <stdlib.h>
#if !defined(LLVM_XXH_USE_NEON)
#if (defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) && \
!defined(__ARM_BIG_ENDIAN)
#define LLVM_XXH_USE_NEON …
#else
#define LLVM_XXH_USE_NEON …
#endif
#endif
#if LLVM_XXH_USE_NEON
#include <arm_neon.h>
#endif
usingnamespacellvm;
usingnamespacesupport;
static uint64_t rotl64(uint64_t X, size_t R) { … }
constexpr uint32_t PRIME32_1 = …;
constexpr uint32_t PRIME32_2 = …;
constexpr uint32_t PRIME32_3 = …;
static const uint64_t PRIME64_1 = …;
static const uint64_t PRIME64_2 = …;
static const uint64_t PRIME64_3 = …;
static const uint64_t PRIME64_4 = …;
static const uint64_t PRIME64_5 = …;
static uint64_t round(uint64_t Acc, uint64_t Input) { … }
static uint64_t mergeRound(uint64_t Acc, uint64_t Val) { … }
static uint64_t XXH64_avalanche(uint64_t hash) { … }
uint64_t llvm::xxHash64(StringRef Data) { … }
uint64_t llvm::xxHash64(ArrayRef<uint8_t> Data) { … }
constexpr size_t XXH3_SECRETSIZE_MIN = …;
constexpr size_t XXH_SECRET_DEFAULT_SIZE = …;
constexpr uint8_t kSecret[XXH_SECRET_DEFAULT_SIZE] = …;
constexpr uint64_t PRIME_MX1 = …;
constexpr uint64_t PRIME_MX2 = …;
static uint64_t XXH3_mul128_fold64(uint64_t lhs, uint64_t rhs) { … }
constexpr size_t XXH_STRIPE_LEN = …;
constexpr size_t XXH_SECRET_CONSUME_RATE = …;
constexpr size_t XXH_ACC_NB = …;
static uint64_t XXH3_avalanche(uint64_t hash) { … }
static uint64_t XXH3_len_1to3_64b(const uint8_t *input, size_t len,
const uint8_t *secret, uint64_t seed) { … }
static uint64_t XXH3_len_4to8_64b(const uint8_t *input, size_t len,
const uint8_t *secret, uint64_t seed) { … }
static uint64_t XXH3_len_9to16_64b(const uint8_t *input, size_t len,
const uint8_t *secret, uint64_t const seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE
static uint64_t XXH3_len_0to16_64b(const uint8_t *input, size_t len,
const uint8_t *secret, uint64_t const seed) { … }
static uint64_t XXH3_mix16B(const uint8_t *input, uint8_t const *secret,
uint64_t seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE
static uint64_t XXH3_len_17to128_64b(const uint8_t *input, size_t len,
const uint8_t *secret,
uint64_t const seed) { … }
constexpr size_t XXH3_MIDSIZE_MAX = …;
constexpr size_t XXH3_MIDSIZE_STARTOFFSET = …;
constexpr size_t XXH3_MIDSIZE_LASTOFFSET = …;
LLVM_ATTRIBUTE_NOINLINE
static uint64_t XXH3_len_129to240_64b(const uint8_t *input, size_t len,
const uint8_t *secret, uint64_t seed) { … }
#if LLVM_XXH_USE_NEON
#define XXH3_accumulate_512 …
#define XXH3_scrambleAcc …
#if defined(__GNUC__) || defined(__clang__)
#define XXH_ALIASING …
#else
#define XXH_ALIASING …
#endif
typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64x2_t XXH_vld1q_u64(void const *ptr) {
return vreinterpretq_u64_u8(vld1q_u8((uint8_t const *)ptr));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
static void XXH3_accumulate_512_neon(uint64_t *acc, const uint8_t *input,
const uint8_t *secret) {
xxh_aliasing_uint64x2_t *const xacc = (xxh_aliasing_uint64x2_t *)acc;
#ifdef __clang__
#pragma clang loop unroll(full)
#endif
for (size_t i = 0; i < XXH_ACC_NB / 2; i += 2) {
uint64x2_t data_vec_1 = XXH_vld1q_u64(input + (i * 16));
uint64x2_t data_vec_2 = XXH_vld1q_u64(input + ((i + 1) * 16));
uint64x2_t key_vec_1 = XXH_vld1q_u64(secret + (i * 16));
uint64x2_t key_vec_2 = XXH_vld1q_u64(secret + ((i + 1) * 16));
uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
uint32x4x2_t unzipped = vuzpq_u32(vreinterpretq_u32_u64(data_key_1),
vreinterpretq_u32_u64(data_key_2));
uint32x4_t data_key_lo = unzipped.val[0];
uint32x4_t data_key_hi = unzipped.val[1];
uint64x2_t sum_1 = vmlal_u32(data_swap_1, vget_low_u32(data_key_lo),
vget_low_u32(data_key_hi));
uint64x2_t sum_2 = vmlal_u32(data_swap_2, vget_high_u32(data_key_lo),
vget_high_u32(data_key_hi));
xacc[i] = vaddq_u64(xacc[i], sum_1);
xacc[i + 1] = vaddq_u64(xacc[i + 1], sum_2);
}
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
static void XXH3_scrambleAcc_neon(uint64_t *acc, const uint8_t *secret) {
xxh_aliasing_uint64x2_t *const xacc = (xxh_aliasing_uint64x2_t *)acc;
uint32x2_t const kPrimeLo = vdup_n_u32(PRIME32_1);
uint32x4_t const kPrimeHi =
vreinterpretq_u32_u64(vdupq_n_u64((uint64_t)PRIME32_1 << 32));
for (size_t i = 0; i < XXH_ACC_NB / 2; ++i) {
uint64x2_t acc_vec = XXH_vld1q_u64(acc + (2 * i));
uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
uint64x2_t key_vec = XXH_vld1q_u64(secret + (i * 16));
uint64x2_t data_key = veorq_u64(data_vec, key_vec);
uint32x4_t prod_hi = vmulq_u32(vreinterpretq_u32_u64(data_key), kPrimeHi);
uint32x2_t data_key_lo = vmovn_u64(data_key);
xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
}
}
#else
#define XXH3_accumulate_512 …
#define XXH3_scrambleAcc …
LLVM_ATTRIBUTE_ALWAYS_INLINE
static void XXH3_accumulate_512_scalar(uint64_t *acc, const uint8_t *input,
const uint8_t *secret) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE
static void XXH3_scrambleAcc_scalar(uint64_t *acc, const uint8_t *secret) { … }
#endif
LLVM_ATTRIBUTE_ALWAYS_INLINE
static void XXH3_accumulate(uint64_t *acc, const uint8_t *input,
const uint8_t *secret, size_t nbStripes) { … }
static uint64_t XXH3_mix2Accs(const uint64_t *acc, const uint8_t *secret) { … }
static uint64_t XXH3_mergeAccs(const uint64_t *acc, const uint8_t *key,
uint64_t start) { … }
LLVM_ATTRIBUTE_NOINLINE
static uint64_t XXH3_hashLong_64b(const uint8_t *input, size_t len,
const uint8_t *secret, size_t secretSize) { … }
uint64_t llvm::xxh3_64bits(ArrayRef<uint8_t> data) { … }
#if __has_builtin(__builtin_rotateleft32) && \
__has_builtin(__builtin_rotateleft64)
#define XXH_rotl32 …
#define XXH_rotl64 …
#elif defined(_MSC_VER)
#define XXH_rotl32 …
#define XXH_rotl64 …
#else
#define XXH_rotl32 …
#define XXH_rotl64 …
#endif
#define XXH_mult32to64(x, y) …
static XXH128_hash_t XXH_mult64to128(uint64_t lhs, uint64_t rhs) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE constexpr uint64_t XXH_xorshift64(uint64_t v64,
int shift) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE static XXH128_hash_t
XXH3_len_1to3_128b(const uint8_t *input, size_t len, const uint8_t *secret,
uint64_t seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE static XXH128_hash_t
XXH3_len_4to8_128b(const uint8_t *input, size_t len, const uint8_t *secret,
uint64_t seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE static XXH128_hash_t
XXH3_len_9to16_128b(const uint8_t *input, size_t len, const uint8_t *secret,
uint64_t seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE static XXH128_hash_t
XXH3_len_0to16_128b(const uint8_t *input, size_t len, const uint8_t *secret,
uint64_t seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE static XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc, const uint8_t *input_1, const uint8_t *input_2,
const uint8_t *secret, uint64_t seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE static XXH128_hash_t
XXH3_len_17to128_128b(const uint8_t *input, size_t len, const uint8_t *secret,
size_t secretSize, uint64_t seed) { … }
LLVM_ATTRIBUTE_NOINLINE static XXH128_hash_t
XXH3_len_129to240_128b(const uint8_t *input, size_t len, const uint8_t *secret,
size_t secretSize, uint64_t seed) { … }
LLVM_ATTRIBUTE_ALWAYS_INLINE XXH128_hash_t
XXH3_hashLong_128b(const uint8_t *input, size_t len, const uint8_t *secret,
size_t secretSize) { … }
llvm::XXH128_hash_t llvm::xxh3_128bits(ArrayRef<uint8_t> data) { … }