#include <immintrin.h>
#include "config/aom_dsp_rtcd.h"
#include "aom_dsp/x86/masked_variance_intrin_ssse3.h"
#include "aom_dsp/x86/synonyms.h"
static inline __m128i mm256_add_hi_lo_epi16(const __m256i val) { … }
static inline __m128i mm256_add_hi_lo_epi32(const __m256i val) { … }
static inline void variance_kernel_avx2(const __m256i src, const __m256i ref,
__m256i *const sse,
__m256i *const sum) { … }
static inline int variance_final_from_32bit_sum_avx2(__m256i vsse, __m128i vsum,
unsigned int *const sse) { … }
static inline int variance_final_512_avx2(__m256i vsse, __m256i vsum,
unsigned int *const sse) { … }
static inline int variance_final_1024_avx2(__m256i vsse, __m256i vsum,
unsigned int *const sse) { … }
static inline __m256i sum_to_32bit_avx2(const __m256i sum) { … }
static inline int variance_final_2048_avx2(__m256i vsse, __m256i vsum,
unsigned int *const sse) { … }
static inline void variance16_kernel_avx2(
const uint8_t *const src, const int src_stride, const uint8_t *const ref,
const int ref_stride, __m256i *const sse, __m256i *const sum) { … }
static inline void variance32_kernel_avx2(const uint8_t *const src,
const uint8_t *const ref,
__m256i *const sse,
__m256i *const sum) { … }
static inline void variance16_avx2(const uint8_t *src, const int src_stride,
const uint8_t *ref, const int ref_stride,
const int h, __m256i *const vsse,
__m256i *const vsum) { … }
static inline void variance32_avx2(const uint8_t *src, const int src_stride,
const uint8_t *ref, const int ref_stride,
const int h, __m256i *const vsse,
__m256i *const vsum) { … }
static inline void variance64_avx2(const uint8_t *src, const int src_stride,
const uint8_t *ref, const int ref_stride,
const int h, __m256i *const vsse,
__m256i *const vsum) { … }
static inline void variance128_avx2(const uint8_t *src, const int src_stride,
const uint8_t *ref, const int ref_stride,
const int h, __m256i *const vsse,
__m256i *const vsum) { … }
#define AOM_VAR_NO_LOOP_AVX2(bw, bh, bits, max_pixel) …
AOM_VAR_NO_LOOP_AVX2(…)
AOM_VAR_NO_LOOP_AVX2(…)
AOM_VAR_NO_LOOP_AVX2(…)
AOM_VAR_NO_LOOP_AVX2(…)
AOM_VAR_NO_LOOP_AVX2(…)
AOM_VAR_NO_LOOP_AVX2(…)
AOM_VAR_NO_LOOP_AVX2(…)
#if !CONFIG_REALTIME_ONLY
AOM_VAR_NO_LOOP_AVX2(64, 16, 10, 1024)
AOM_VAR_NO_LOOP_AVX2(32, 8, 8, 512)
AOM_VAR_NO_LOOP_AVX2(16, 64, 10, 1024)
AOM_VAR_NO_LOOP_AVX2(16, 4, 6, 512)
#endif
#define AOM_VAR_LOOP_AVX2(bw, bh, bits, uh) …
AOM_VAR_LOOP_AVX2(…)
AOM_VAR_LOOP_AVX2(…)
AOM_VAR_LOOP_AVX2(…)
AOM_VAR_LOOP_AVX2(…)
unsigned int aom_mse16x16_avx2(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) { … }
static inline __m256i mm256_loadu2(const uint8_t *p0, const uint8_t *p1) { … }
#if CONFIG_AV1_HIGHBITDEPTH
static inline __m256i mm256_loadu2_16(const uint16_t *p0, const uint16_t *p1) {
const __m256i d =
_mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)p1));
return _mm256_insertf128_si256(d, _mm_loadu_si128((const __m128i *)p0), 1);
}
#endif
static inline void comp_mask_pred_line_avx2(const __m256i s0, const __m256i s1,
const __m256i a,
uint8_t *comp_pred) { … }
void aom_comp_avg_pred_avx2(uint8_t *comp_pred, const uint8_t *pred, int width,
int height, const uint8_t *ref, int ref_stride) { … }
void aom_comp_mask_pred_avx2(uint8_t *comp_pred, const uint8_t *pred, int width,
int height, const uint8_t *ref, int ref_stride,
const uint8_t *mask, int mask_stride,
int invert_mask) { … }
#if CONFIG_AV1_HIGHBITDEPTH
static inline __m256i highbd_comp_mask_pred_line_avx2(const __m256i s0,
const __m256i s1,
const __m256i a) {
const __m256i alpha_max = _mm256_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
const __m256i round_const =
_mm256_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
const __m256i a_inv = _mm256_sub_epi16(alpha_max, a);
const __m256i s_lo = _mm256_unpacklo_epi16(s0, s1);
const __m256i a_lo = _mm256_unpacklo_epi16(a, a_inv);
const __m256i pred_lo = _mm256_madd_epi16(s_lo, a_lo);
const __m256i pred_l = _mm256_srai_epi32(
_mm256_add_epi32(pred_lo, round_const), AOM_BLEND_A64_ROUND_BITS);
const __m256i s_hi = _mm256_unpackhi_epi16(s0, s1);
const __m256i a_hi = _mm256_unpackhi_epi16(a, a_inv);
const __m256i pred_hi = _mm256_madd_epi16(s_hi, a_hi);
const __m256i pred_h = _mm256_srai_epi32(
_mm256_add_epi32(pred_hi, round_const), AOM_BLEND_A64_ROUND_BITS);
const __m256i comp = _mm256_packs_epi32(pred_l, pred_h);
return comp;
}
void aom_highbd_comp_mask_pred_avx2(uint8_t *comp_pred8, const uint8_t *pred8,
int width, int height, const uint8_t *ref8,
int ref_stride, const uint8_t *mask,
int mask_stride, int invert_mask) {
int i = 0;
uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
uint16_t *comp_pred = CONVERT_TO_SHORTPTR(comp_pred8);
const uint16_t *src0 = invert_mask ? pred : ref;
const uint16_t *src1 = invert_mask ? ref : pred;
const int stride0 = invert_mask ? width : ref_stride;
const int stride1 = invert_mask ? ref_stride : width;
const __m256i zero = _mm256_setzero_si256();
if (width == 8) {
do {
const __m256i s0 = mm256_loadu2_16(src0 + stride0, src0);
const __m256i s1 = mm256_loadu2_16(src1 + stride1, src1);
const __m128i m_l = _mm_loadl_epi64((const __m128i *)mask);
const __m128i m_h = _mm_loadl_epi64((const __m128i *)(mask + 8));
__m256i m = _mm256_castsi128_si256(m_l);
m = _mm256_insertf128_si256(m, m_h, 1);
const __m256i m_16 = _mm256_unpacklo_epi8(m, zero);
const __m256i comp = highbd_comp_mask_pred_line_avx2(s0, s1, m_16);
_mm_storeu_si128((__m128i *)(comp_pred), _mm256_castsi256_si128(comp));
_mm_storeu_si128((__m128i *)(comp_pred + width),
_mm256_extractf128_si256(comp, 1));
src0 += (stride0 << 1);
src1 += (stride1 << 1);
mask += (mask_stride << 1);
comp_pred += (width << 1);
i += 2;
} while (i < height);
} else if (width == 16) {
do {
const __m256i s0 = _mm256_loadu_si256((const __m256i *)(src0));
const __m256i s1 = _mm256_loadu_si256((const __m256i *)(src1));
const __m256i m_16 =
_mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)mask));
const __m256i comp = highbd_comp_mask_pred_line_avx2(s0, s1, m_16);
_mm256_storeu_si256((__m256i *)comp_pred, comp);
src0 += stride0;
src1 += stride1;
mask += mask_stride;
comp_pred += width;
i += 1;
} while (i < height);
} else {
do {
for (int x = 0; x < width; x += 32) {
const __m256i s0 = _mm256_loadu_si256((const __m256i *)(src0 + x));
const __m256i s2 = _mm256_loadu_si256((const __m256i *)(src0 + x + 16));
const __m256i s1 = _mm256_loadu_si256((const __m256i *)(src1 + x));
const __m256i s3 = _mm256_loadu_si256((const __m256i *)(src1 + x + 16));
const __m256i m01_16 =
_mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)(mask + x)));
const __m256i m23_16 = _mm256_cvtepu8_epi16(
_mm_loadu_si128((const __m128i *)(mask + x + 16)));
const __m256i comp = highbd_comp_mask_pred_line_avx2(s0, s1, m01_16);
const __m256i comp1 = highbd_comp_mask_pred_line_avx2(s2, s3, m23_16);
_mm256_storeu_si256((__m256i *)comp_pred, comp);
_mm256_storeu_si256((__m256i *)(comp_pred + 16), comp1);
comp_pred += 32;
}
src0 += stride0;
src1 += stride1;
mask += mask_stride;
i += 1;
} while (i < height);
}
}
#endif
static uint64_t mse_4xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
int sstride, int h) { … }
static uint64_t mse_4xh_quad_16bit_avx2(uint8_t *dst, int dstride,
uint16_t *src, int src_blk_stride,
int h) { … }
static uint64_t mse_8xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
int sstride, int h) { … }
static uint64_t mse_8xh_dual_16bit_avx2(uint8_t *dst, int dstride,
uint16_t *src, int src_blk_stride,
int h) { … }
uint64_t aom_mse_wxh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
int sstride, int w, int h) { … }
uint64_t aom_mse_16xh_16bit_avx2(uint8_t *dst, int dstride, uint16_t *src,
int w, int h) { … }
static inline void calc_sum_sse_wd32_avx2(const uint8_t *src,
const uint8_t *ref,
__m256i set_one_minusone,
__m256i sse_8x16[2],
__m256i sum_8x16[2]) { … }
static inline __m256i calc_sum_sse_order(__m256i *sse_hx16, __m256i *sum_hx16,
unsigned int *tot_sse, int *tot_sum) { … }
static inline void get_var_sse_sum_8x8_quad_avx2(
const uint8_t *src, int src_stride, const uint8_t *ref,
const int ref_stride, const int h, uint32_t *sse8x8, int *sum8x8,
unsigned int *tot_sse, int *tot_sum, uint32_t *var8x8) { … }
static inline void get_var_sse_sum_16x16_dual_avx2(
const uint8_t *src, int src_stride, const uint8_t *ref,
const int ref_stride, const int h, uint32_t *sse16x16,
unsigned int *tot_sse, int *tot_sum, uint32_t *var16x16) { … }
void aom_get_var_sse_sum_8x8_quad_avx2(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr, int ref_stride,
uint32_t *sse8x8, int *sum8x8,
unsigned int *tot_sse, int *tot_sum,
uint32_t *var8x8) { … }
void aom_get_var_sse_sum_16x16_dual_avx2(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr, int ref_stride,
uint32_t *sse16x16,
unsigned int *tot_sse, int *tot_sum,
uint32_t *var16x16) { … }