#include "adler32_simd.h"
#define BASE …
#define NMAX …
#if defined(ADLER32_SIMD_SSSE3)
#include <tmmintrin.h>
uint32_t ZLIB_INTERNAL adler32_simd_(
uint32_t adler,
const unsigned char *buf,
z_size_t len)
{ … }
#elif defined(ADLER32_SIMD_NEON)
#include <arm_neon.h>
uint32_t ZLIB_INTERNAL adler32_simd_(
uint32_t adler,
const unsigned char *buf,
z_size_t len)
{
uint32_t s1 = adler & 0xffff;
uint32_t s2 = adler >> 16;
if ((uintptr_t)buf & 15) {
while ((uintptr_t)buf & 15) {
s2 += (s1 += *buf++);
--len;
}
if (s1 >= BASE)
s1 -= BASE;
s2 %= BASE;
}
const unsigned BLOCK_SIZE = 1 << 5;
z_size_t blocks = len / BLOCK_SIZE;
len -= blocks * BLOCK_SIZE;
while (blocks)
{
unsigned n = NMAX / BLOCK_SIZE;
if (n > blocks)
n = (unsigned) blocks;
blocks -= n;
uint32x4_t v_s2 = (uint32x4_t) { 0, 0, 0, s1 * n };
uint32x4_t v_s1 = (uint32x4_t) { 0, 0, 0, 0 };
uint16x8_t v_column_sum_1 = vdupq_n_u16(0);
uint16x8_t v_column_sum_2 = vdupq_n_u16(0);
uint16x8_t v_column_sum_3 = vdupq_n_u16(0);
uint16x8_t v_column_sum_4 = vdupq_n_u16(0);
do {
const uint8x16_t bytes1 = vld1q_u8((uint8_t*)(buf));
const uint8x16_t bytes2 = vld1q_u8((uint8_t*)(buf + 16));
v_s2 = vaddq_u32(v_s2, v_s1);
v_s1 = vpadalq_u16(v_s1, vpadalq_u8(vpaddlq_u8(bytes1), bytes2));
v_column_sum_1 = vaddw_u8(v_column_sum_1, vget_low_u8 (bytes1));
v_column_sum_2 = vaddw_u8(v_column_sum_2, vget_high_u8(bytes1));
v_column_sum_3 = vaddw_u8(v_column_sum_3, vget_low_u8 (bytes2));
v_column_sum_4 = vaddw_u8(v_column_sum_4, vget_high_u8(bytes2));
buf += BLOCK_SIZE;
} while (--n);
v_s2 = vshlq_n_u32(v_s2, 5);
v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_1),
(uint16x4_t) { 32, 31, 30, 29 });
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_1),
(uint16x4_t) { 28, 27, 26, 25 });
v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_2),
(uint16x4_t) { 24, 23, 22, 21 });
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_2),
(uint16x4_t) { 20, 19, 18, 17 });
v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_3),
(uint16x4_t) { 16, 15, 14, 13 });
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_3),
(uint16x4_t) { 12, 11, 10, 9 });
v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_4),
(uint16x4_t) { 8, 7, 6, 5 });
v_s2 = vmlal_u16(v_s2, vget_high_u16(v_column_sum_4),
(uint16x4_t) { 4, 3, 2, 1 });
uint32x2_t sum1 = vpadd_u32(vget_low_u32(v_s1), vget_high_u32(v_s1));
uint32x2_t sum2 = vpadd_u32(vget_low_u32(v_s2), vget_high_u32(v_s2));
uint32x2_t s1s2 = vpadd_u32(sum1, sum2);
s1 += vget_lane_u32(s1s2, 0);
s2 += vget_lane_u32(s1s2, 1);
s1 %= BASE;
s2 %= BASE;
}
if (len) {
if (len >= 16) {
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
s2 += (s1 += *buf++);
len -= 16;
}
while (len--) {
s2 += (s1 += *buf++);
}
if (s1 >= BASE)
s1 -= BASE;
s2 %= BASE;
}
return s1 | (s2 << 16);
}
#elif defined(ADLER32_SIMD_RVV)
#include <riscv_vector.h>
uint32_t ZLIB_INTERNAL adler32_simd_(
uint32_t adler,
const unsigned char *buf,
unsigned long len)
{
size_t vl = __riscv_vsetvlmax_e8m2();
const vuint16m4_t zero16 = __riscv_vmv_v_x_u16m4(0, vl);
vuint16m4_t a_sum = zero16;
vuint32m8_t b_sum = __riscv_vmv_v_x_u32m8(0, vl);
size_t head = len & (vl - 1);
if (head > 0) {
vuint8m2_t zero8 = __riscv_vmv_v_x_u8m2(0, vl);
vuint8m2_t in = __riscv_vle8_v_u8m2(buf, vl);
in = __riscv_vslideup(zero8, in, vl - head, vl);
vuint16m4_t in16 = __riscv_vwcvtu_x(in, vl);
a_sum = in16;
buf += head;
}
const int b_overflow = BASE / 23;
int fixup = b_overflow;
ssize_t iters = (len - head) / vl;
while (iters > 0) {
const vuint16m4_t a_overflow = __riscv_vrsub(a_sum, BASE, vl);
int batch = iters < 22 ? iters : 22;
iters -= batch;
b_sum = __riscv_vwmaccu(b_sum, batch, a_sum, vl);
vuint16m4_t a_batch = zero16, b_batch = zero16;
while (batch-- > 0) {
vuint8m2_t in8 = __riscv_vle8_v_u8m2(buf, vl);
buf += vl;
b_batch = __riscv_vadd(b_batch, a_batch, vl);
a_batch = __riscv_vwaddu_wv(a_batch, in8, vl);
}
vbool4_t ov = __riscv_vmsgeu(a_batch, a_overflow, vl);
a_sum = __riscv_vadd(a_sum, a_batch, vl);
a_sum = __riscv_vadd_mu(ov, a_sum, a_sum, 65536 - BASE, vl);
b_sum = __riscv_vwaddu_wv(b_sum, b_batch, vl);
if (--fixup <= 0) {
b_sum = __riscv_vnmsac(b_sum, BASE, __riscv_vsrl(b_sum, 16, vl), vl);
fixup = b_overflow;
}
}
const vuint16m4_t off = __riscv_vrsub(__riscv_vid_v_u16m4(vl), vl, vl);
vuint16m4_t bsum16 = __riscv_vncvt_x(__riscv_vremu(b_sum, BASE, vl), vl);
b_sum = __riscv_vadd(__riscv_vwmulu(a_sum, off, vl),
__riscv_vwmulu(bsum16, vl, vl), vl);
bsum16 = __riscv_vncvt_x(__riscv_vremu(b_sum, BASE, vl), vl);
uint32_t a = adler & 0xffff;
uint32_t b = ((adler >> 16) + a * (len % BASE)) % BASE;
vuint32m1_t sca = __riscv_vmv_v_x_u32m1(a, 1);
vuint32m1_t scb = __riscv_vmv_v_x_u32m1(b, 1);
sca = __riscv_vwredsumu(a_sum, sca, vl);
scb = __riscv_vwredsumu(bsum16, scb, vl);
a = __riscv_vmv_x(sca);
b = __riscv_vmv_x(scb);
a %= BASE;
b %= BASE;
return (b << 16) | a;
}
#endif