#include "src/dsp/dsp.h"
#if defined(WEBP_USE_NEON)
#include <arm_neon.h>
#include "src/dsp/lossless.h"
#include "src/dsp/neon.h"
#if !defined(WORK_AROUND_GCC)
static void ConvertBGRAToRGBA_NEON(const uint32_t* src,
int num_pixels, uint8_t* dst) {
const uint32_t* const end = src + (num_pixels & ~15);
for (; src < end; src += 16) {
uint8x16x4_t pixel = vld4q_u8((uint8_t*)src);
const uint8x16_t tmp = pixel.val[0];
pixel.val[0] = pixel.val[2];
pixel.val[2] = tmp;
vst4q_u8(dst, pixel);
dst += 64;
}
VP8LConvertBGRAToRGBA_C(src, num_pixels & 15, dst);
}
static void ConvertBGRAToBGR_NEON(const uint32_t* src,
int num_pixels, uint8_t* dst) {
const uint32_t* const end = src + (num_pixels & ~15);
for (; src < end; src += 16) {
const uint8x16x4_t pixel = vld4q_u8((uint8_t*)src);
const uint8x16x3_t tmp = { { pixel.val[0], pixel.val[1], pixel.val[2] } };
vst3q_u8(dst, tmp);
dst += 48;
}
VP8LConvertBGRAToBGR_C(src, num_pixels & 15, dst);
}
static void ConvertBGRAToRGB_NEON(const uint32_t* src,
int num_pixels, uint8_t* dst) {
const uint32_t* const end = src + (num_pixels & ~15);
for (; src < end; src += 16) {
const uint8x16x4_t pixel = vld4q_u8((uint8_t*)src);
const uint8x16x3_t tmp = { { pixel.val[2], pixel.val[1], pixel.val[0] } };
vst3q_u8(dst, tmp);
dst += 48;
}
VP8LConvertBGRAToRGB_C(src, num_pixels & 15, dst);
}
#else
static const uint8_t kRGBAShuffle[8] = { 2, 1, 0, 3, 6, 5, 4, 7 };
static void ConvertBGRAToRGBA_NEON(const uint32_t* src,
int num_pixels, uint8_t* dst) {
const uint32_t* const end = src + (num_pixels & ~1);
const uint8x8_t shuffle = vld1_u8(kRGBAShuffle);
for (; src < end; src += 2) {
const uint8x8_t pixels = vld1_u8((uint8_t*)src);
vst1_u8(dst, vtbl1_u8(pixels, shuffle));
dst += 8;
}
VP8LConvertBGRAToRGBA_C(src, num_pixels & 1, dst);
}
static const uint8_t kBGRShuffle[3][8] = {
{ 0, 1, 2, 4, 5, 6, 8, 9 },
{ 10, 12, 13, 14, 16, 17, 18, 20 },
{ 21, 22, 24, 25, 26, 28, 29, 30 }
};
static void ConvertBGRAToBGR_NEON(const uint32_t* src,
int num_pixels, uint8_t* dst) {
const uint32_t* const end = src + (num_pixels & ~7);
const uint8x8_t shuffle0 = vld1_u8(kBGRShuffle[0]);
const uint8x8_t shuffle1 = vld1_u8(kBGRShuffle[1]);
const uint8x8_t shuffle2 = vld1_u8(kBGRShuffle[2]);
for (; src < end; src += 8) {
uint8x8x4_t pixels;
INIT_VECTOR4(pixels,
vld1_u8((const uint8_t*)(src + 0)),
vld1_u8((const uint8_t*)(src + 2)),
vld1_u8((const uint8_t*)(src + 4)),
vld1_u8((const uint8_t*)(src + 6)));
vst1_u8(dst + 0, vtbl4_u8(pixels, shuffle0));
vst1_u8(dst + 8, vtbl4_u8(pixels, shuffle1));
vst1_u8(dst + 16, vtbl4_u8(pixels, shuffle2));
dst += 8 * 3;
}
VP8LConvertBGRAToBGR_C(src, num_pixels & 7, dst);
}
static const uint8_t kRGBShuffle[3][8] = {
{ 2, 1, 0, 6, 5, 4, 10, 9 },
{ 8, 14, 13, 12, 18, 17, 16, 22 },
{ 21, 20, 26, 25, 24, 30, 29, 28 }
};
static void ConvertBGRAToRGB_NEON(const uint32_t* src,
int num_pixels, uint8_t* dst) {
const uint32_t* const end = src + (num_pixels & ~7);
const uint8x8_t shuffle0 = vld1_u8(kRGBShuffle[0]);
const uint8x8_t shuffle1 = vld1_u8(kRGBShuffle[1]);
const uint8x8_t shuffle2 = vld1_u8(kRGBShuffle[2]);
for (; src < end; src += 8) {
uint8x8x4_t pixels;
INIT_VECTOR4(pixels,
vld1_u8((const uint8_t*)(src + 0)),
vld1_u8((const uint8_t*)(src + 2)),
vld1_u8((const uint8_t*)(src + 4)),
vld1_u8((const uint8_t*)(src + 6)));
vst1_u8(dst + 0, vtbl4_u8(pixels, shuffle0));
vst1_u8(dst + 8, vtbl4_u8(pixels, shuffle1));
vst1_u8(dst + 16, vtbl4_u8(pixels, shuffle2));
dst += 8 * 3;
}
VP8LConvertBGRAToRGB_C(src, num_pixels & 7, dst);
}
#endif
#define LOAD_U32_AS_U8 …
#define LOAD_U32P_AS_U8 …
#define LOADQ_U32_AS_U8 …
#define LOADQ_U32P_AS_U8 …
#define GET_U8_AS_U32 …
#define GETQ_U8_AS_U32 …
#define STOREQ_U8_AS_U32P …
#define ROTATE32_LEFT …
static WEBP_INLINE uint8x8_t Average2_u8_NEON(uint32_t a0, uint32_t a1) {
const uint8x8_t A0 = LOAD_U32_AS_U8(a0);
const uint8x8_t A1 = LOAD_U32_AS_U8(a1);
return vhadd_u8(A0, A1);
}
static WEBP_INLINE uint32_t ClampedAddSubtractHalf_NEON(uint32_t c0,
uint32_t c1,
uint32_t c2) {
const uint8x8_t avg = Average2_u8_NEON(c0, c1);
const uint8x8_t C2 = LOAD_U32_AS_U8(c2);
const uint8x8_t cmp = vcgt_u8(C2, avg);
const uint8x8_t C2_1 = vadd_u8(C2, cmp);
const int8x8_t diff_avg = vreinterpret_s8_u8(vhsub_u8(avg, C2_1));
const int16x8_t avg_16 = vreinterpretq_s16_u16(vmovl_u8(avg));
const uint8x8_t res = vqmovun_s16(vaddw_s8(avg_16, diff_avg));
const uint32_t output = GET_U8_AS_U32(res);
return output;
}
static WEBP_INLINE uint32_t Average2_NEON(uint32_t a0, uint32_t a1) {
const uint8x8_t avg_u8x8 = Average2_u8_NEON(a0, a1);
const uint32_t avg = GET_U8_AS_U32(avg_u8x8);
return avg;
}
static WEBP_INLINE uint32_t Average3_NEON(uint32_t a0, uint32_t a1,
uint32_t a2) {
const uint8x8_t avg0 = Average2_u8_NEON(a0, a2);
const uint8x8_t A1 = LOAD_U32_AS_U8(a1);
const uint32_t avg = GET_U8_AS_U32(vhadd_u8(avg0, A1));
return avg;
}
static uint32_t Predictor5_NEON(const uint32_t* const left,
const uint32_t* const top) {
return Average3_NEON(*left, top[0], top[1]);
}
static uint32_t Predictor6_NEON(const uint32_t* const left,
const uint32_t* const top) {
return Average2_NEON(*left, top[-1]);
}
static uint32_t Predictor7_NEON(const uint32_t* const left,
const uint32_t* const top) {
return Average2_NEON(*left, top[0]);
}
static uint32_t Predictor13_NEON(const uint32_t* const left,
const uint32_t* const top) {
return ClampedAddSubtractHalf_NEON(*left, top[0], top[-1]);
}
static void PredictorAdd0_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
const uint8x16_t black = vreinterpretq_u8_u32(vdupq_n_u32(ARGB_BLACK));
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t res = vaddq_u8(src, black);
STOREQ_U8_AS_U32P(&out[i], res);
}
VP8LPredictorsAdd_C[0](in + i, upper + i, num_pixels - i, out + i);
}
static void PredictorAdd1_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
const uint8x16_t zero = LOADQ_U32_AS_U8(0);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t shift0 = vextq_u8(zero, src, 12);
const uint8x16_t sum0 = vaddq_u8(src, shift0);
const uint8x16_t shift1 = vextq_u8(zero, sum0, 8);
const uint8x16_t sum1 = vaddq_u8(sum0, shift1);
const uint8x16_t prev = LOADQ_U32_AS_U8(out[i - 1]);
const uint8x16_t res = vaddq_u8(sum1, prev);
STOREQ_U8_AS_U32P(&out[i], res);
}
VP8LPredictorsAdd_C[1](in + i, upper + i, num_pixels - i, out + i);
}
#define GENERATE_PREDICTOR_1 …
GENERATE_PREDICTOR_1(2, upper[i])
GENERATE_PREDICTOR_1(3, upper[i + 1])
GENERATE_PREDICTOR_1(4, upper[i - 1])
#undef GENERATE_PREDICTOR_1
#define DO_PRED5 …
static void PredictorAdd5_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i + 0]);
const uint8x16_t TR = LOADQ_U32P_AS_U8(&upper[i + 1]);
DO_PRED5(0);
DO_PRED5(1);
DO_PRED5(2);
DO_PRED5(3);
}
VP8LPredictorsAdd_C[5](in + i, upper + i, num_pixels - i, out + i);
}
#undef DO_PRED5
#define DO_PRED67 …
static void PredictorAdd6_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t top = LOADQ_U32P_AS_U8(&upper[i - 1]);
DO_PRED67(0);
DO_PRED67(1);
DO_PRED67(2);
DO_PRED67(3);
}
VP8LPredictorsAdd_C[6](in + i, upper + i, num_pixels - i, out + i);
}
static void PredictorAdd7_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t top = LOADQ_U32P_AS_U8(&upper[i]);
DO_PRED67(0);
DO_PRED67(1);
DO_PRED67(2);
DO_PRED67(3);
}
VP8LPredictorsAdd_C[7](in + i, upper + i, num_pixels - i, out + i);
}
#undef DO_PRED67
#define GENERATE_PREDICTOR_2 …
GENERATE_PREDICTOR_2(8, upper[i - 1])
GENERATE_PREDICTOR_2(9, upper[i + 1])
#undef GENERATE_PREDICTOR_2
#define DO_PRED10 …
static void PredictorAdd10_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
const uint8x16_t TR = LOADQ_U32P_AS_U8(&upper[i + 1]);
const uint8x16_t avgTTR = vhaddq_u8(T, TR);
DO_PRED10(0);
DO_PRED10(1);
DO_PRED10(2);
DO_PRED10(3);
}
VP8LPredictorsAdd_C[10](in + i, upper + i, num_pixels - i, out + i);
}
#undef DO_PRED10
#define DO_PRED11 …
static void PredictorAdd11_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
const uint8x16_t pTTL = vabdq_u8(T, TL);
const uint16x8_t sum_TTL = vpaddlq_u8(pTTL);
const uint32x4_t pb = vpaddlq_u16(sum_TTL);
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t sumTin = vaddq_u8(T, src);
DO_PRED11(0);
DO_PRED11(1);
DO_PRED11(2);
DO_PRED11(3);
}
VP8LPredictorsAdd_C[11](in + i, upper + i, num_pixels - i, out + i);
}
#undef DO_PRED11
#define DO_PRED12 …
static void PredictorAdd12_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
uint16x8_t L = vmovl_u8(LOAD_U32_AS_U8(out[-1]));
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
const int16x8_t diff_lo =
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(T), vget_low_u8(TL)));
const int16x8_t diff_hi =
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(T), vget_high_u8(TL)));
DO_PRED12(diff_lo, 0);
DO_PRED12(diff_lo, 1);
DO_PRED12(diff_hi, 2);
DO_PRED12(diff_hi, 3);
}
VP8LPredictorsAdd_C[12](in + i, upper + i, num_pixels - i, out + i);
}
#undef DO_PRED12
#define DO_PRED13 …
static void PredictorAdd13_NEON(const uint32_t* in, const uint32_t* upper,
int num_pixels, uint32_t* out) {
int i;
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
DO_PRED13(0, vget_low_u8);
DO_PRED13(1, vget_low_u8);
DO_PRED13(2, vget_high_u8);
DO_PRED13(3, vget_high_u8);
}
VP8LPredictorsAdd_C[13](in + i, upper + i, num_pixels - i, out + i);
}
#undef DO_PRED13
#undef LOAD_U32_AS_U8
#undef LOAD_U32P_AS_U8
#undef LOADQ_U32_AS_U8
#undef LOADQ_U32P_AS_U8
#undef GET_U8_AS_U32
#undef GETQ_U8_AS_U32
#undef STOREQ_U8_AS_U32P
#undef ROTATE32_LEFT
#if defined(__APPLE__) && WEBP_AARCH64 && \
defined(__apple_build_version__) && (__apple_build_version__< 6020037)
#define USE_VTBLQ
#endif
#ifdef USE_VTBLQ
static const uint8_t kGreenShuffle[16] = {
1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13, 255
};
static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb,
const uint8x16_t shuffle) {
return vcombine_u8(vtbl1q_u8(argb, vget_low_u8(shuffle)),
vtbl1q_u8(argb, vget_high_u8(shuffle)));
}
#else
static const uint8_t kGreenShuffle[8] = { 1, 255, 1, 255, 5, 255, 5, 255 };
static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb,
const uint8x8_t shuffle) {
return vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle),
vtbl1_u8(vget_high_u8(argb), shuffle));
}
#endif
static void AddGreenToBlueAndRed_NEON(const uint32_t* src, int num_pixels,
uint32_t* dst) {
const uint32_t* const end = src + (num_pixels & ~3);
#ifdef USE_VTBLQ
const uint8x16_t shuffle = vld1q_u8(kGreenShuffle);
#else
const uint8x8_t shuffle = vld1_u8(kGreenShuffle);
#endif
for (; src < end; src += 4, dst += 4) {
const uint8x16_t argb = vld1q_u8((const uint8_t*)src);
const uint8x16_t greens = DoGreenShuffle_NEON(argb, shuffle);
vst1q_u8((uint8_t*)dst, vaddq_u8(argb, greens));
}
VP8LAddGreenToBlueAndRed_C(src, num_pixels & 3, dst);
}
static void TransformColorInverse_NEON(const VP8LMultipliers* const m,
const uint32_t* const src,
int num_pixels, uint32_t* dst) {
#define CST …
const int16_t rb[8] = {
CST(green_to_blue_), CST(green_to_red_),
CST(green_to_blue_), CST(green_to_red_),
CST(green_to_blue_), CST(green_to_red_),
CST(green_to_blue_), CST(green_to_red_)
};
const int16x8_t mults_rb = vld1q_s16(rb);
const int16_t b2[8] = {
0, CST(red_to_blue_), 0, CST(red_to_blue_),
0, CST(red_to_blue_), 0, CST(red_to_blue_),
};
const int16x8_t mults_b2 = vld1q_s16(b2);
#undef CST
#ifdef USE_VTBLQ
static const uint8_t kg0g0[16] = {
255, 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13
};
const uint8x16_t shuffle = vld1q_u8(kg0g0);
#else
static const uint8_t k0g0g[8] = { 255, 1, 255, 1, 255, 5, 255, 5 };
const uint8x8_t shuffle = vld1_u8(k0g0g);
#endif
const uint32x4_t mask_ag = vdupq_n_u32(0xff00ff00u);
int i;
for (i = 0; i + 4 <= num_pixels; i += 4) {
const uint8x16_t in = vld1q_u8((const uint8_t*)(src + i));
const uint32x4_t a0g0 = vandq_u32(vreinterpretq_u32_u8(in), mask_ag);
const uint8x16_t greens = DoGreenShuffle_NEON(in, shuffle);
const int16x8_t A = vqdmulhq_s16(vreinterpretq_s16_u8(greens), mults_rb);
const int8x16_t B = vaddq_s8(vreinterpretq_s8_u8(in),
vreinterpretq_s8_s16(A));
const int16x8_t C = vshlq_n_s16(vreinterpretq_s16_s8(B), 8);
const int16x8_t D = vqdmulhq_s16(C, mults_b2);
const uint32x4_t E = vshrq_n_u32(vreinterpretq_u32_s16(D), 8);
const int8x16_t F = vaddq_s8(vreinterpretq_s8_u32(E),
vreinterpretq_s8_s16(C));
const uint16x8_t G = vshrq_n_u16(vreinterpretq_u16_s8(F), 8);
const uint32x4_t out = vorrq_u32(vreinterpretq_u32_u16(G), a0g0);
vst1q_u32(dst + i, out);
}
VP8LTransformColorInverse_C(m, src + i, num_pixels - i, dst + i);
}
#undef USE_VTBLQ
extern void VP8LDspInitNEON(void);
WEBP_TSAN_IGNORE_FUNCTION void VP8LDspInitNEON(void) {
VP8LPredictors[5] = Predictor5_NEON;
VP8LPredictors[6] = Predictor6_NEON;
VP8LPredictors[7] = Predictor7_NEON;
VP8LPredictors[13] = Predictor13_NEON;
VP8LPredictorsAdd[0] = PredictorAdd0_NEON;
VP8LPredictorsAdd[1] = PredictorAdd1_NEON;
VP8LPredictorsAdd[2] = PredictorAdd2_NEON;
VP8LPredictorsAdd[3] = PredictorAdd3_NEON;
VP8LPredictorsAdd[4] = PredictorAdd4_NEON;
VP8LPredictorsAdd[5] = PredictorAdd5_NEON;
VP8LPredictorsAdd[6] = PredictorAdd6_NEON;
VP8LPredictorsAdd[7] = PredictorAdd7_NEON;
VP8LPredictorsAdd[8] = PredictorAdd8_NEON;
VP8LPredictorsAdd[9] = PredictorAdd9_NEON;
VP8LPredictorsAdd[10] = PredictorAdd10_NEON;
VP8LPredictorsAdd[11] = PredictorAdd11_NEON;
VP8LPredictorsAdd[12] = PredictorAdd12_NEON;
VP8LPredictorsAdd[13] = PredictorAdd13_NEON;
VP8LConvertBGRAToRGBA = ConvertBGRAToRGBA_NEON;
VP8LConvertBGRAToBGR = ConvertBGRAToBGR_NEON;
VP8LConvertBGRAToRGB = ConvertBGRAToRGB_NEON;
VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed_NEON;
VP8LTransformColorInverse = TransformColorInverse_NEON;
}
#else
WEBP_DSP_INIT_STUB(VP8LDspInitNEON)
#endif