#include "private/cpu.h"
#ifndef FLAC__INTEGER_ONLY_LIBRARY
#ifndef FLAC__NO_ASM
#if defined FLAC__CPU_ARM64 && FLAC__HAS_NEONINTRIN
#include "private/lpc.h"
#include "FLAC/assert.h"
#include "FLAC/format.h"
#include "private/macros.h"
#include <arm_neon.h>
#if FLAC__HAS_A64NEONINTRIN
void FLAC__lpc_compute_autocorrelation_intrin_neon_lag_14(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
#undef MAX_LAG
#define MAX_LAG …
#include "deduplication/lpc_compute_autocorrelation_intrin_neon.c"
}
void FLAC__lpc_compute_autocorrelation_intrin_neon_lag_10(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
#undef MAX_LAG
#define MAX_LAG …
#include "deduplication/lpc_compute_autocorrelation_intrin_neon.c"
}
void FLAC__lpc_compute_autocorrelation_intrin_neon_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
#undef MAX_LAG
#define MAX_LAG …
#include "deduplication/lpc_compute_autocorrelation_intrin_neon.c"
}
#endif
#define MUL_32_BIT_LOOP_UNROOL_3 …
#define MACC_32BIT_LOOP_UNROOL_3 …
void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[])
{
int i;
FLAC__int32 sum;
int32x4_t tmp_vec[20];
FLAC__ASSERT(order > 0);
FLAC__ASSERT(order <= 32);
if(order <= 12) {
if(order > 8) {
if(order > 10) {
if (order == 12) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], qlp_coeff[10], qlp_coeff[11]};
tmp_vec[0] = vld1q_s32(data - 12);
tmp_vec[1] = vld1q_s32(data - 11);
tmp_vec[2] = vld1q_s32(data - 10);
tmp_vec[3] = vld1q_s32(data - 9);
tmp_vec[4] = vld1q_s32(data - 8);
tmp_vec[5] = vld1q_s32(data - 7);
tmp_vec[6] = vld1q_s32(data - 6);
tmp_vec[7] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[8] = vld1q_s32(data + i - 4);
tmp_vec[9] = vld1q_s32(data+i-3);
tmp_vec[10] = vld1q_s32(data+i-2);
tmp_vec[11] = vld1q_s32(data+i-1);
tmp_vec[12] = vld1q_s32(data+i);
tmp_vec[13] = vld1q_s32(data+i+1);
tmp_vec[14] = vld1q_s32(data+i+2);
tmp_vec[15] = vld1q_s32(data+i+3);
tmp_vec[16] = vld1q_s32(data + i + 4);
tmp_vec[17] = vld1q_s32(data + i + 5);
tmp_vec[18] = vld1q_s32(data + i + 6);
tmp_vec[19] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 3)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 2)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 1)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_2, 0)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 3)
MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 2)
MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 1)
MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(10, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(11, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
tmp_vec[5] = tmp_vec[17];
tmp_vec[6] = tmp_vec[18];
tmp_vec[7] = tmp_vec[19];
}
}
else {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], qlp_coeff[10], 0};
tmp_vec[0] = vld1q_s32(data - 11);
tmp_vec[1] = vld1q_s32(data - 10);
tmp_vec[2] = vld1q_s32(data - 9);
tmp_vec[3] = vld1q_s32(data - 8);
tmp_vec[4] = vld1q_s32(data - 7);
tmp_vec[5] = vld1q_s32(data - 6);
tmp_vec[6] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[7] = vld1q_s32(data + i - 4);
tmp_vec[8] = vld1q_s32(data + i - 3);
tmp_vec[9] = vld1q_s32(data + i - 2);
tmp_vec[10] = vld1q_s32(data + i - 1);
tmp_vec[11] = vld1q_s32(data + i - 0);
tmp_vec[12] = vld1q_s32(data + i + 1);
tmp_vec[13] = vld1q_s32(data + i + 2);
tmp_vec[14] = vld1q_s32(data + i + 3);
tmp_vec[15] = vld1q_s32(data + i + 4);
tmp_vec[16] = vld1q_s32(data + i + 5);
tmp_vec[17] = vld1q_s32(data + i + 6);
tmp_vec[18] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 2)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 1)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 0)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 3)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 2)
MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 1)
MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(10, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
tmp_vec[5] = tmp_vec[17];
tmp_vec[6] = tmp_vec[18];
}
}
}
else {
if(order == 10) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], 0, 0};
tmp_vec[0] = vld1q_s32(data - 10);
tmp_vec[1] = vld1q_s32(data - 9);
tmp_vec[2] = vld1q_s32(data - 8);
tmp_vec[3] = vld1q_s32(data - 7);
tmp_vec[4] = vld1q_s32(data - 6);
tmp_vec[5] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[6] = vld1q_s32(data + i - 4);
tmp_vec[7] = vld1q_s32(data + i - 3);
tmp_vec[8] = vld1q_s32(data + i - 2);
tmp_vec[9] = vld1q_s32(data + i - 1);
tmp_vec[10] = vld1q_s32(data + i - 0);
tmp_vec[11] = vld1q_s32(data + i + 1);
tmp_vec[12] = vld1q_s32(data + i + 2);
tmp_vec[13] = vld1q_s32(data + i + 3);
tmp_vec[14] = vld1q_s32(data + i + 4);
tmp_vec[15] = vld1q_s32(data + i + 5);
tmp_vec[16] = vld1q_s32(data + i + 6);
tmp_vec[17] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 1)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 0)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 3)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 2)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 1)
MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
tmp_vec[5] = tmp_vec[17];
}
}
else {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8], 0, 0, 0};
tmp_vec[0] = vld1q_s32(data - 9);
tmp_vec[1] = vld1q_s32(data - 8);
tmp_vec[2] = vld1q_s32(data - 7);
tmp_vec[3] = vld1q_s32(data - 6);
tmp_vec[4] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[5] = vld1q_s32(data + i - 4);
tmp_vec[6] = vld1q_s32(data + i - 3);
tmp_vec[7] = vld1q_s32(data + i - 2);
tmp_vec[8] = vld1q_s32(data + i - 1);
tmp_vec[9] = vld1q_s32(data + i - 0);
tmp_vec[10] = vld1q_s32(data + i + 1);
tmp_vec[11] = vld1q_s32(data + i + 2);
tmp_vec[12] = vld1q_s32(data + i + 3);
tmp_vec[13] = vld1q_s32(data + i + 4);
tmp_vec[14] = vld1q_s32(data + i + 5);
tmp_vec[15] = vld1q_s32(data + i + 6);
tmp_vec[16] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 0)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 3)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 2)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 1)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
}
}
}
}
else if(order > 4) {
if(order > 6) {
if(order == 8) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
tmp_vec[0] = vld1q_s32(data - 8);
tmp_vec[1] = vld1q_s32(data - 7);
tmp_vec[2] = vld1q_s32(data - 6);
tmp_vec[3] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[4] = vld1q_s32(data + i - 4);
tmp_vec[5] = vld1q_s32(data + i - 3);
tmp_vec[6] = vld1q_s32(data + i - 2);
tmp_vec[7] = vld1q_s32(data + i - 1);
tmp_vec[8] = vld1q_s32(data + i - 0);
tmp_vec[9] = vld1q_s32(data + i + 1);
tmp_vec[10] = vld1q_s32(data + i + 2);
tmp_vec[11] = vld1q_s32(data + i + 3);
tmp_vec[12] = vld1q_s32(data + i + 4);
tmp_vec[13] = vld1q_s32(data + i + 5);
tmp_vec[14] = vld1q_s32(data + i + 6);
tmp_vec[15] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 3)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 2)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 1)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
}
}
else {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], 0};
tmp_vec[0] = vld1q_s32(data - 7);
tmp_vec[1] = vld1q_s32(data - 6);
tmp_vec[2] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[3] = vld1q_s32(data + i - 4);
tmp_vec[4] = vld1q_s32(data + i - 3);
tmp_vec[5] = vld1q_s32(data + i - 2);
tmp_vec[6] = vld1q_s32(data + i - 1);
tmp_vec[7] = vld1q_s32(data + i - 0);
tmp_vec[8] = vld1q_s32(data + i + 1);
tmp_vec[9] = vld1q_s32(data + i + 2);
tmp_vec[10] = vld1q_s32(data + i + 3);
tmp_vec[11] = vld1q_s32(data + i + 4);
tmp_vec[12] = vld1q_s32(data + i + 5);
tmp_vec[13] = vld1q_s32(data + i + 6);
tmp_vec[14] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 2)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 1)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
}
}
}
else {
if(order == 6) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], 0, 0};
tmp_vec[0] = vld1q_s32(data - 6);
tmp_vec[1] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[2] = vld1q_s32(data + i - 4);
tmp_vec[3] = vld1q_s32(data + i - 3);
tmp_vec[4] = vld1q_s32(data + i - 2);
tmp_vec[5] = vld1q_s32(data + i - 1);
tmp_vec[6] = vld1q_s32(data + i - 0);
tmp_vec[7] = vld1q_s32(data + i + 1);
tmp_vec[8] = vld1q_s32(data + i + 2);
tmp_vec[9] = vld1q_s32(data + i + 3);
tmp_vec[10] = vld1q_s32(data + i + 4);
tmp_vec[11] = vld1q_s32(data + i + 5);
tmp_vec[12] = vld1q_s32(data + i + 6);
tmp_vec[13] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 1)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
}
}
else {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], 0, 0, 0};
tmp_vec[0] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[1] = vld1q_s32(data + i - 4);
tmp_vec[2] = vld1q_s32(data + i - 3);
tmp_vec[3] = vld1q_s32(data + i - 2);
tmp_vec[4] = vld1q_s32(data + i - 1);
tmp_vec[5] = vld1q_s32(data + i - 0);
tmp_vec[6] = vld1q_s32(data + i + 1);
tmp_vec[7] = vld1q_s32(data + i + 2);
tmp_vec[8] = vld1q_s32(data + i + 3);
tmp_vec[9] = vld1q_s32(data + i + 4);
tmp_vec[10] = vld1q_s32(data + i + 5);
tmp_vec[11] = vld1q_s32(data + i + 6);
tmp_vec[12] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 0)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
tmp_vec[0] = tmp_vec[12];
}
}
}
}
else {
if(order > 2) {
if(order == 4) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[0] = vld1q_s32(data + i - 4);
tmp_vec[1] = vld1q_s32(data + i - 3);
tmp_vec[2] = vld1q_s32(data + i - 2);
tmp_vec[3] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i - 0);
tmp_vec[5] = vld1q_s32(data + i + 1);
tmp_vec[6] = vld1q_s32(data + i + 2);
tmp_vec[7] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 4);
tmp_vec[9] = vld1q_s32(data + i + 5);
tmp_vec[10] = vld1q_s32(data + i + 6);
tmp_vec[11] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_0, 3)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
}
}
else {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], 0};
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[0] = vld1q_s32(data + i - 3);
tmp_vec[1] = vld1q_s32(data + i - 2);
tmp_vec[2] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i + 1);
tmp_vec[5] = vld1q_s32(data + i + 2);
tmp_vec[6] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 5);
tmp_vec[9] = vld1q_s32(data + i + 6);
tmp_vec[10] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_0, 2)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
}
}
}
else {
if(order == 2) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], 0, 0};
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[0] = vld1q_s32(data + i - 2);
tmp_vec[1] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i + 2);
tmp_vec[5] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 6);
tmp_vec[9] = vld1q_s32(data + i + 7);
MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_0, 1)
MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 0)
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
}
}
else {
int32x4_t qlp_coeff_0 = vdupq_n_s32(qlp_coeff[0]);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int32x4_t summ_0, summ_1, summ_2;
tmp_vec[0] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 7);
summ_0 = vmulq_s32(tmp_vec[0], qlp_coeff_0);
summ_1 = vmulq_s32(tmp_vec[4], qlp_coeff_0);
summ_2 = vmulq_s32(tmp_vec[8], qlp_coeff_0);
vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
}
}
}
}
for(; i < (int)data_len; i++) {
sum = 0;
switch(order) {
case 12: sum += qlp_coeff[11] * data[i-12];
case 11: sum += qlp_coeff[10] * data[i-11];
case 10: sum += qlp_coeff[ 9] * data[i-10];
case 9: sum += qlp_coeff[ 8] * data[i- 9];
case 8: sum += qlp_coeff[ 7] * data[i- 8];
case 7: sum += qlp_coeff[ 6] * data[i- 7];
case 6: sum += qlp_coeff[ 5] * data[i- 6];
case 5: sum += qlp_coeff[ 4] * data[i- 5];
case 4: sum += qlp_coeff[ 3] * data[i- 4];
case 3: sum += qlp_coeff[ 2] * data[i- 3];
case 2: sum += qlp_coeff[ 1] * data[i- 2];
case 1: sum += qlp_coeff[ 0] * data[i- 1];
}
residual[i] = data[i] - (sum >> lp_quantization);
}
}
else {
for(i = 0; i < (int)data_len; i++) {
sum = 0;
switch(order) {
case 32: sum += qlp_coeff[31] * data[i-32];
case 31: sum += qlp_coeff[30] * data[i-31];
case 30: sum += qlp_coeff[29] * data[i-30];
case 29: sum += qlp_coeff[28] * data[i-29];
case 28: sum += qlp_coeff[27] * data[i-28];
case 27: sum += qlp_coeff[26] * data[i-27];
case 26: sum += qlp_coeff[25] * data[i-26];
case 25: sum += qlp_coeff[24] * data[i-25];
case 24: sum += qlp_coeff[23] * data[i-24];
case 23: sum += qlp_coeff[22] * data[i-23];
case 22: sum += qlp_coeff[21] * data[i-22];
case 21: sum += qlp_coeff[20] * data[i-21];
case 20: sum += qlp_coeff[19] * data[i-20];
case 19: sum += qlp_coeff[18] * data[i-19];
case 18: sum += qlp_coeff[17] * data[i-18];
case 17: sum += qlp_coeff[16] * data[i-17];
case 16: sum += qlp_coeff[15] * data[i-16];
case 15: sum += qlp_coeff[14] * data[i-15];
case 14: sum += qlp_coeff[13] * data[i-14];
case 13: sum += qlp_coeff[12] * data[i-13];
sum += qlp_coeff[11] * data[i-12];
sum += qlp_coeff[10] * data[i-11];
sum += qlp_coeff[ 9] * data[i-10];
sum += qlp_coeff[ 8] * data[i- 9];
sum += qlp_coeff[ 7] * data[i- 8];
sum += qlp_coeff[ 6] * data[i- 7];
sum += qlp_coeff[ 5] * data[i- 6];
sum += qlp_coeff[ 4] * data[i- 5];
sum += qlp_coeff[ 3] * data[i- 4];
sum += qlp_coeff[ 2] * data[i- 3];
sum += qlp_coeff[ 1] * data[i- 2];
sum += qlp_coeff[ 0] * data[i- 1];
}
residual[i] = data[i] - (sum >> lp_quantization);
}
}
}
#define MUL_64_BIT_LOOP_UNROOL_3 …
#define MACC_64_BIT_LOOP_UNROOL_3 …
#define SHIFT_SUMS_64BITS_AND_STORE_SUB …
void FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_neon(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]) {
int i;
FLAC__int64 sum;
int32x4_t tmp_vec[20];
int32x4_t res0, res1, res2;
int64x2_t lp_quantization_vec = vdupq_n_s64(-lp_quantization);
FLAC__ASSERT(order > 0);
FLAC__ASSERT(order <= 32);
if(order <= 12) {
if(order > 8) {
if(order > 10) {
if(order == 12) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4],qlp_coeff[5],qlp_coeff[6],qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8],qlp_coeff[9],qlp_coeff[10],qlp_coeff[11]};
tmp_vec[0] = vld1q_s32(data - 12);
tmp_vec[1] = vld1q_s32(data - 11);
tmp_vec[2] = vld1q_s32(data - 10);
tmp_vec[3] = vld1q_s32(data - 9);
tmp_vec[4] = vld1q_s32(data - 8);
tmp_vec[5] = vld1q_s32(data - 7);
tmp_vec[6] = vld1q_s32(data - 6);
tmp_vec[7] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[8] = vld1q_s32(data+i-4);
tmp_vec[9] = vld1q_s32(data+i-3);
tmp_vec[10] = vld1q_s32(data+i-2);
tmp_vec[11] = vld1q_s32(data+i-1);
tmp_vec[12] = vld1q_s32(data+i);
tmp_vec[13] = vld1q_s32(data+i+1);
tmp_vec[14] = vld1q_s32(data+i+2);
tmp_vec[15] = vld1q_s32(data+i+3);
tmp_vec[16] = vld1q_s32(data + i + 4);
tmp_vec[17] = vld1q_s32(data + i + 5);
tmp_vec[18] = vld1q_s32(data + i + 6);
tmp_vec[19] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 3)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 2)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 1)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_2, 0)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 3)
MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 2)
MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 1)
MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(10,qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(11,qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
tmp_vec[5] = tmp_vec[17];
tmp_vec[6] = tmp_vec[18];
tmp_vec[7] = tmp_vec[19];
}
}
else {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4],qlp_coeff[5],qlp_coeff[6],qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8],qlp_coeff[9],qlp_coeff[10],0};
tmp_vec[0] = vld1q_s32(data - 11);
tmp_vec[1] = vld1q_s32(data - 10);
tmp_vec[2] = vld1q_s32(data - 9);
tmp_vec[3] = vld1q_s32(data - 8);
tmp_vec[4] = vld1q_s32(data - 7);
tmp_vec[5] = vld1q_s32(data - 6);
tmp_vec[6] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[7] = vld1q_s32(data+i-4);
tmp_vec[8] = vld1q_s32(data+i-3);
tmp_vec[9] = vld1q_s32(data+i-2);
tmp_vec[10] = vld1q_s32(data+i-1);
tmp_vec[11] = vld1q_s32(data+i);
tmp_vec[12] = vld1q_s32(data+i+1);
tmp_vec[13] = vld1q_s32(data+i+2);
tmp_vec[14] = vld1q_s32(data+i+3);
tmp_vec[15] = vld1q_s32(data + i + 4);
tmp_vec[16] = vld1q_s32(data + i + 5);
tmp_vec[17] = vld1q_s32(data + i + 6);
tmp_vec[18] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 2)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 1)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 0)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 3)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 2)
MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 1)
MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(10,qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
tmp_vec[5] = tmp_vec[17];
tmp_vec[6] = tmp_vec[18];
}
}
}
else
{
if (order == 10) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], 0, 0};
tmp_vec[0] = vld1q_s32(data - 10);
tmp_vec[1] = vld1q_s32(data - 9);
tmp_vec[2] = vld1q_s32(data - 8);
tmp_vec[3] = vld1q_s32(data - 7);
tmp_vec[4] = vld1q_s32(data - 6);
tmp_vec[5] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[6] = vld1q_s32(data + i - 4);
tmp_vec[7] = vld1q_s32(data + i - 3);
tmp_vec[8] = vld1q_s32(data + i - 2);
tmp_vec[9] = vld1q_s32(data + i - 1);
tmp_vec[10] = vld1q_s32(data + i - 0);
tmp_vec[11] = vld1q_s32(data + i + 1);
tmp_vec[12] = vld1q_s32(data + i + 2);
tmp_vec[13] = vld1q_s32(data + i + 3);
tmp_vec[14] = vld1q_s32(data + i + 4);
tmp_vec[15] = vld1q_s32(data + i + 5);
tmp_vec[16] = vld1q_s32(data + i + 6);
tmp_vec[17] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 1)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 0)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 3)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 2)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 1)
MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
tmp_vec[5] = tmp_vec[17];
}
}
else {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
int32x4_t qlp_coeff_2 = {qlp_coeff[8], 0, 0, 0};
tmp_vec[0] = vld1q_s32(data - 9);
tmp_vec[1] = vld1q_s32(data - 8);
tmp_vec[2] = vld1q_s32(data - 7);
tmp_vec[3] = vld1q_s32(data - 6);
tmp_vec[4] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[5] = vld1q_s32(data + i - 4);
tmp_vec[6] = vld1q_s32(data + i - 3);
tmp_vec[7] = vld1q_s32(data + i - 2);
tmp_vec[8] = vld1q_s32(data + i - 1);
tmp_vec[9] = vld1q_s32(data + i - 0);
tmp_vec[10] = vld1q_s32(data + i + 1);
tmp_vec[11] = vld1q_s32(data + i + 2);
tmp_vec[12] = vld1q_s32(data + i + 3);
tmp_vec[13] = vld1q_s32(data + i + 4);
tmp_vec[14] = vld1q_s32(data + i + 5);
tmp_vec[15] = vld1q_s32(data + i + 6);
tmp_vec[16] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 0)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 3)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 2)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 1)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
tmp_vec[4] = tmp_vec[16];
}
}
}
}
else if (order > 4)
{
if (order > 6)
{
if (order == 8)
{
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
tmp_vec[0] = vld1q_s32(data - 8);
tmp_vec[1] = vld1q_s32(data - 7);
tmp_vec[2] = vld1q_s32(data - 6);
tmp_vec[3] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[4] = vld1q_s32(data + i - 4);
tmp_vec[5] = vld1q_s32(data + i - 3);
tmp_vec[6] = vld1q_s32(data + i - 2);
tmp_vec[7] = vld1q_s32(data + i - 1);
tmp_vec[8] = vld1q_s32(data + i - 0);
tmp_vec[9] = vld1q_s32(data + i + 1);
tmp_vec[10] = vld1q_s32(data + i + 2);
tmp_vec[11] = vld1q_s32(data + i + 3);
tmp_vec[12] = vld1q_s32(data + i + 4);
tmp_vec[13] = vld1q_s32(data + i + 5);
tmp_vec[14] = vld1q_s32(data + i + 6);
tmp_vec[15] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 3)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 2)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 1)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
tmp_vec[3] = tmp_vec[15];
}
}
else
{
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], 0};
tmp_vec[0] = vld1q_s32(data - 7);
tmp_vec[1] = vld1q_s32(data - 6);
tmp_vec[2] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[3] = vld1q_s32(data +i - 4);
tmp_vec[4] = vld1q_s32(data + i - 3);
tmp_vec[5] = vld1q_s32(data + i - 2);
tmp_vec[6] = vld1q_s32(data + i - 1);
tmp_vec[7] = vld1q_s32(data + i - 0);
tmp_vec[8] = vld1q_s32(data + i + 1);
tmp_vec[9] = vld1q_s32(data + i + 2);
tmp_vec[10] = vld1q_s32(data + i + 3);
tmp_vec[11] = vld1q_s32(data + i + 4);
tmp_vec[12] = vld1q_s32(data + i + 5);
tmp_vec[13] = vld1q_s32(data + i + 6);
tmp_vec[14] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 2)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 1)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
tmp_vec[2] = tmp_vec[14];
}
}
}
else
{
if (order == 6) {
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], 0, 0};
tmp_vec[0] = vld1q_s32(data - 6);
tmp_vec[1] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[2] = vld1q_s32(data + i - 4);
tmp_vec[3] = vld1q_s32(data + i - 3);
tmp_vec[4] = vld1q_s32(data + i - 2);
tmp_vec[5] = vld1q_s32(data + i - 1);
tmp_vec[6] = vld1q_s32(data + i - 0);
tmp_vec[7] = vld1q_s32(data + i + 1);
tmp_vec[8] = vld1q_s32(data + i + 2);
tmp_vec[9] = vld1q_s32(data + i + 3);
tmp_vec[10] = vld1q_s32(data + i + 4);
tmp_vec[11] = vld1q_s32(data + i + 5);
tmp_vec[12] = vld1q_s32(data + i + 6);
tmp_vec[13] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 1)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
tmp_vec[1] = tmp_vec[13];
}
}
else
{
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
int32x4_t qlp_coeff_1 = {qlp_coeff[4], 0, 0, 0};
tmp_vec[0] = vld1q_s32(data - 5);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[1] = vld1q_s32(data + i - 4);
tmp_vec[2] = vld1q_s32(data + i - 3);
tmp_vec[3] = vld1q_s32(data + i - 2);
tmp_vec[4] = vld1q_s32(data + i - 1);
tmp_vec[5] = vld1q_s32(data + i - 0);
tmp_vec[6] = vld1q_s32(data + i + 1);
tmp_vec[7] = vld1q_s32(data + i + 2);
tmp_vec[8] = vld1q_s32(data + i + 3);
tmp_vec[9] = vld1q_s32(data + i + 4);
tmp_vec[10] = vld1q_s32(data + i + 5);
tmp_vec[11] = vld1q_s32(data + i + 6);
tmp_vec[12] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 0)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
tmp_vec[0] = tmp_vec[12];
}
}
}
}
else
{
if (order > 2)
{
if (order == 4)
{
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[0] = vld1q_s32(data + i - 4);
tmp_vec[1] = vld1q_s32(data + i - 3);
tmp_vec[2] = vld1q_s32(data + i - 2);
tmp_vec[3] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i - 0);
tmp_vec[5] = vld1q_s32(data + i + 1);
tmp_vec[6] = vld1q_s32(data + i + 2);
tmp_vec[7] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 4);
tmp_vec[9] = vld1q_s32(data + i + 5);
tmp_vec[10] = vld1q_s32(data + i + 6);
tmp_vec[11] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_0, 3)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
}
}
else
{
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], 0};
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[0] = vld1q_s32(data + i - 3);
tmp_vec[1] = vld1q_s32(data + i - 2);
tmp_vec[2] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i + 1);
tmp_vec[5] = vld1q_s32(data + i + 2);
tmp_vec[6] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 5);
tmp_vec[9] = vld1q_s32(data + i + 6);
tmp_vec[10] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_0, 2)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
}
}
}
else
{
if (order == 2)
{
int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], 0, 0};
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[0] = vld1q_s32(data + i - 2);
tmp_vec[1] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i + 2);
tmp_vec[5] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 6);
tmp_vec[9] = vld1q_s32(data + i + 7);
MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_0, 1)
MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 0)
SHIFT_SUMS_64BITS_AND_STORE_SUB()
}
}
else
{
int32x2_t qlp_coeff_0_2 = vdup_n_s32(qlp_coeff[0]);
int32x4_t qlp_coeff_0_4 = vdupq_n_s32(qlp_coeff[0]);
for (i = 0; i < (int)data_len - 11; i += 12)
{
int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
tmp_vec[0] = vld1q_s32(data + i - 1);
tmp_vec[4] = vld1q_s32(data + i + 3);
tmp_vec[8] = vld1q_s32(data + i + 7);
summ_l_0 = vmull_s32(vget_low_s32(tmp_vec[0]), qlp_coeff_0_2);
summ_h_0 = vmull_high_s32(tmp_vec[0], qlp_coeff_0_4);
summ_l_1 = vmull_s32(vget_low_s32(tmp_vec[4]), qlp_coeff_0_2);
summ_h_1 = vmull_high_s32(tmp_vec[4], qlp_coeff_0_4);
summ_l_2 = vmull_s32(vget_low_s32(tmp_vec[8]), qlp_coeff_0_2);
summ_h_2 = vmull_high_s32(tmp_vec[8], qlp_coeff_0_4);
SHIFT_SUMS_64BITS_AND_STORE_SUB()
}
}
}
}
for (; i < (int)data_len; i++)
{
sum = 0;
switch (order)
{
case 12:
sum += qlp_coeff[11] * (FLAC__int64)data[i - 12];
case 11:
sum += qlp_coeff[10] * (FLAC__int64)data[i - 11];
case 10:
sum += qlp_coeff[9] * (FLAC__int64)data[i - 10];
case 9:
sum += qlp_coeff[8] * (FLAC__int64)data[i - 9];
case 8:
sum += qlp_coeff[7] * (FLAC__int64)data[i - 8];
case 7:
sum += qlp_coeff[6] * (FLAC__int64)data[i - 7];
case 6:
sum += qlp_coeff[5] * (FLAC__int64)data[i - 6];
case 5:
sum += qlp_coeff[4] * (FLAC__int64)data[i - 5];
case 4:
sum += qlp_coeff[3] * (FLAC__int64)data[i - 4];
case 3:
sum += qlp_coeff[2] * (FLAC__int64)data[i - 3];
case 2:
sum += qlp_coeff[1] * (FLAC__int64)data[i - 2];
case 1:
sum += qlp_coeff[0] * (FLAC__int64)data[i - 1];
}
residual[i] = data[i] - (sum >> lp_quantization);
}
}
else
{
for (i = 0; i < (int)data_len; i++)
{
sum = 0;
switch (order)
{
case 32:
sum += qlp_coeff[31] * (FLAC__int64)data[i - 32];
case 31:
sum += qlp_coeff[30] * (FLAC__int64)data[i - 31];
case 30:
sum += qlp_coeff[29] * (FLAC__int64)data[i - 30];
case 29:
sum += qlp_coeff[28] * (FLAC__int64)data[i - 29];
case 28:
sum += qlp_coeff[27] * (FLAC__int64)data[i - 28];
case 27:
sum += qlp_coeff[26] * (FLAC__int64)data[i - 27];
case 26:
sum += qlp_coeff[25] * (FLAC__int64)data[i - 26];
case 25:
sum += qlp_coeff[24] * (FLAC__int64)data[i - 25];
case 24:
sum += qlp_coeff[23] * (FLAC__int64)data[i - 24];
case 23:
sum += qlp_coeff[22] * (FLAC__int64)data[i - 23];
case 22:
sum += qlp_coeff[21] * (FLAC__int64)data[i - 22];
case 21:
sum += qlp_coeff[20] * (FLAC__int64)data[i - 21];
case 20:
sum += qlp_coeff[19] * (FLAC__int64)data[i - 20];
case 19:
sum += qlp_coeff[18] * (FLAC__int64)data[i - 19];
case 18:
sum += qlp_coeff[17] * (FLAC__int64)data[i - 18];
case 17:
sum += qlp_coeff[16] * (FLAC__int64)data[i - 17];
case 16:
sum += qlp_coeff[15] * (FLAC__int64)data[i - 16];
case 15:
sum += qlp_coeff[14] * (FLAC__int64)data[i - 15];
case 14:
sum += qlp_coeff[13] * (FLAC__int64)data[i - 14];
case 13:
sum += qlp_coeff[12] * (FLAC__int64)data[i - 13];
sum += qlp_coeff[11] * (FLAC__int64)data[i - 12];
sum += qlp_coeff[10] * (FLAC__int64)data[i - 11];
sum += qlp_coeff[9] * (FLAC__int64)data[i - 10];
sum += qlp_coeff[8] * (FLAC__int64)data[i - 9];
sum += qlp_coeff[7] * (FLAC__int64)data[i - 8];
sum += qlp_coeff[6] * (FLAC__int64)data[i - 7];
sum += qlp_coeff[5] * (FLAC__int64)data[i - 6];
sum += qlp_coeff[4] * (FLAC__int64)data[i - 5];
sum += qlp_coeff[3] * (FLAC__int64)data[i - 4];
sum += qlp_coeff[2] * (FLAC__int64)data[i - 3];
sum += qlp_coeff[1] * (FLAC__int64)data[i - 2];
sum += qlp_coeff[0] * (FLAC__int64)data[i - 1];
}
residual[i] = data[i] - (sum >> lp_quantization);
}
}
return;
}
#endif
#endif
#endif