chromium/third_party/libaom/source/libaom/av1/common/x86/cfl_avx2.c

/*
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
 *
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
 */
#include <immintrin.h>

#include "config/av1_rtcd.h"

#include "av1/common/cfl.h"

#include "av1/common/x86/cfl_simd.h"

#define CFL_GET_SUBSAMPLE_FUNCTION_AVX2(sub, bd)

/**
 * Adds 4 pixels (in a 2x2 grid) and multiplies them by 2. Resulting in a more
 * precise version of a box filter 4:2:0 pixel subsampling in Q3.
 *
 * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
 * active area is specified using width and height.
 *
 * Note: We don't need to worry about going over the active area, as long as we
 * stay inside the CfL prediction buffer.
 *
 * Note: For 4:2:0 luma subsampling, the width will never be greater than 16.
 */
static void cfl_luma_subsampling_420_lbd_avx2(const uint8_t *input,
                                              int input_stride,
                                              uint16_t *pred_buf_q3, int width,
                                              int height) {}

CFL_GET_SUBSAMPLE_FUNCTION_AVX2()

/**
 * Adds 2 pixels (in a 2x1 grid) and multiplies them by 4. Resulting in a more
 * precise version of a box filter 4:2:2 pixel subsampling in Q3.
 *
 * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
 * active area is specified using width and height.
 *
 * Note: We don't need to worry about going over the active area, as long as we
 * stay inside the CfL prediction buffer.
 */
static void cfl_luma_subsampling_422_lbd_avx2(const uint8_t *input,
                                              int input_stride,
                                              uint16_t *pred_buf_q3, int width,
                                              int height) {}

CFL_GET_SUBSAMPLE_FUNCTION_AVX2()

/**
 * Multiplies the pixels by 8 (scaling in Q3). The AVX2 subsampling is only
 * performed on block of width 32.
 *
 * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
 * active area is specified using width and height.
 *
 * Note: We don't need to worry about going over the active area, as long as we
 * stay inside the CfL prediction buffer.
 */
static void cfl_luma_subsampling_444_lbd_avx2(const uint8_t *input,
                                              int input_stride,
                                              uint16_t *pred_buf_q3, int width,
                                              int height) {}

CFL_GET_SUBSAMPLE_FUNCTION_AVX2()

#if CONFIG_AV1_HIGHBITDEPTH
/**
 * Adds 4 pixels (in a 2x2 grid) and multiplies them by 2. Resulting in a more
 * precise version of a box filter 4:2:0 pixel subsampling in Q3.
 *
 * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
 * active area is specified using width and height.
 *
 * Note: We don't need to worry about going over the active area, as long as we
 * stay inside the CfL prediction buffer.
 *
 * Note: For 4:2:0 luma subsampling, the width will never be greater than 16.
 */
static void cfl_luma_subsampling_420_hbd_avx2(const uint16_t *input,
                                              int input_stride,
                                              uint16_t *pred_buf_q3, int width,
                                              int height) {
  (void)width;  // Forever 32
  const int luma_stride = input_stride << 1;
  __m256i *row = (__m256i *)pred_buf_q3;
  const __m256i *row_end = row + (height >> 1) * CFL_BUF_LINE_I256;
  do {
    __m256i top = _mm256_loadu_si256((__m256i *)input);
    __m256i bot = _mm256_loadu_si256((__m256i *)(input + input_stride));
    __m256i sum = _mm256_add_epi16(top, bot);

    __m256i top_1 = _mm256_loadu_si256((__m256i *)(input + 16));
    __m256i bot_1 = _mm256_loadu_si256((__m256i *)(input + 16 + input_stride));
    __m256i sum_1 = _mm256_add_epi16(top_1, bot_1);

    __m256i hsum = _mm256_hadd_epi16(sum, sum_1);
    hsum = _mm256_permute4x64_epi64(hsum, _MM_SHUFFLE(3, 1, 2, 0));
    hsum = _mm256_add_epi16(hsum, hsum);

    _mm256_storeu_si256(row, hsum);

    input += luma_stride;
  } while ((row += CFL_BUF_LINE_I256) < row_end);
}

CFL_GET_SUBSAMPLE_FUNCTION_AVX2(420, hbd)

/**
 * Adds 2 pixels (in a 2x1 grid) and multiplies them by 4. Resulting in a more
 * precise version of a box filter 4:2:2 pixel subsampling in Q3.
 *
 * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
 * active area is specified using width and height.
 *
 * Note: We don't need to worry about going over the active area, as long as we
 * stay inside the CfL prediction buffer.
 *
 */
static void cfl_luma_subsampling_422_hbd_avx2(const uint16_t *input,
                                              int input_stride,
                                              uint16_t *pred_buf_q3, int width,
                                              int height) {
  (void)width;  // Forever 32
  __m256i *row = (__m256i *)pred_buf_q3;
  const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
  do {
    __m256i top = _mm256_loadu_si256((__m256i *)input);
    __m256i top_1 = _mm256_loadu_si256((__m256i *)(input + 16));
    __m256i hsum = _mm256_hadd_epi16(top, top_1);
    hsum = _mm256_permute4x64_epi64(hsum, _MM_SHUFFLE(3, 1, 2, 0));
    hsum = _mm256_slli_epi16(hsum, 2);

    _mm256_storeu_si256(row, hsum);

    input += input_stride;
  } while ((row += CFL_BUF_LINE_I256) < row_end);
}

CFL_GET_SUBSAMPLE_FUNCTION_AVX2(422, hbd)

static void cfl_luma_subsampling_444_hbd_avx2(const uint16_t *input,
                                              int input_stride,
                                              uint16_t *pred_buf_q3, int width,
                                              int height) {
  (void)width;  // Forever 32
  __m256i *row = (__m256i *)pred_buf_q3;
  const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
  do {
    __m256i top = _mm256_loadu_si256((__m256i *)input);
    __m256i top_1 = _mm256_loadu_si256((__m256i *)(input + 16));
    _mm256_storeu_si256(row, _mm256_slli_epi16(top, 3));
    _mm256_storeu_si256(row + 1, _mm256_slli_epi16(top_1, 3));
    input += input_stride;
  } while ((row += CFL_BUF_LINE_I256) < row_end);
}

CFL_GET_SUBSAMPLE_FUNCTION_AVX2(444, hbd)
#endif  // CONFIG_AV1_HIGHBITDEPTH

static inline __m256i predict_unclipped(const __m256i *input, __m256i alpha_q12,
                                        __m256i alpha_sign, __m256i dc_q0) {}

static inline void cfl_predict_lbd_avx2(const int16_t *pred_buf_q3,
                                        uint8_t *dst, int dst_stride,
                                        int alpha_q3, int width, int height) {}

CFL_PREDICT_X()
CFL_PREDICT_X()
CFL_PREDICT_X()

cfl_predict_lbd_fn cfl_get_predict_lbd_fn_avx2(TX_SIZE tx_size) {}

#if CONFIG_AV1_HIGHBITDEPTH
static __m256i highbd_max_epi16(int bd) {
  const __m256i neg_one = _mm256_set1_epi16(-1);
  // (1 << bd) - 1 => -(-1 << bd) -1 => -1 - (-1 << bd) => -1 ^ (-1 << bd)
  return _mm256_xor_si256(_mm256_slli_epi16(neg_one, bd), neg_one);
}

static __m256i highbd_clamp_epi16(__m256i u, __m256i zero, __m256i max) {
  return _mm256_max_epi16(_mm256_min_epi16(u, max), zero);
}

static inline void cfl_predict_hbd_avx2(const int16_t *pred_buf_q3,
                                        uint16_t *dst, int dst_stride,
                                        int alpha_q3, int bd, int width,
                                        int height) {
  // Use SSSE3 version for smaller widths
  assert(width == 16 || width == 32);
  const __m256i alpha_sign = _mm256_set1_epi16(alpha_q3);
  const __m256i alpha_q12 = _mm256_slli_epi16(_mm256_abs_epi16(alpha_sign), 9);
  const __m256i dc_q0 = _mm256_loadu_si256((__m256i *)dst);
  const __m256i max = highbd_max_epi16(bd);

  __m256i *row = (__m256i *)pred_buf_q3;
  const __m256i *row_end = row + height * CFL_BUF_LINE_I256;
  do {
    const __m256i res = predict_unclipped(row, alpha_q12, alpha_sign, dc_q0);
    _mm256_storeu_si256((__m256i *)dst,
                        highbd_clamp_epi16(res, _mm256_setzero_si256(), max));
    if (width == 32) {
      const __m256i res_1 =
          predict_unclipped(row + 1, alpha_q12, alpha_sign, dc_q0);
      _mm256_storeu_si256(
          (__m256i *)(dst + 16),
          highbd_clamp_epi16(res_1, _mm256_setzero_si256(), max));
    }
    dst += dst_stride;
  } while ((row += CFL_BUF_LINE_I256) < row_end);
}

CFL_PREDICT_X(avx2, 16, 4, hbd)
CFL_PREDICT_X(avx2, 16, 8, hbd)
CFL_PREDICT_X(avx2, 16, 16, hbd)
CFL_PREDICT_X(avx2, 16, 32, hbd)
CFL_PREDICT_X(avx2, 32, 8, hbd)
CFL_PREDICT_X(avx2, 32, 16, hbd)
CFL_PREDICT_X(avx2, 32, 32, hbd)

cfl_predict_hbd_fn cfl_get_predict_hbd_fn_avx2(TX_SIZE tx_size) {
  static const cfl_predict_hbd_fn pred[TX_SIZES_ALL] = {
    cfl_predict_hbd_4x4_ssse3,  /* 4x4 */
    cfl_predict_hbd_8x8_ssse3,  /* 8x8 */
    cfl_predict_hbd_16x16_avx2, /* 16x16 */
    cfl_predict_hbd_32x32_avx2, /* 32x32 */
    NULL,                       /* 64x64 (invalid CFL size) */
    cfl_predict_hbd_4x8_ssse3,  /* 4x8 */
    cfl_predict_hbd_8x4_ssse3,  /* 8x4 */
    cfl_predict_hbd_8x16_ssse3, /* 8x16 */
    cfl_predict_hbd_16x8_avx2,  /* 16x8 */
    cfl_predict_hbd_16x32_avx2, /* 16x32 */
    cfl_predict_hbd_32x16_avx2, /* 32x16 */
    NULL,                       /* 32x64 (invalid CFL size) */
    NULL,                       /* 64x32 (invalid CFL size) */
    cfl_predict_hbd_4x16_ssse3, /* 4x16  */
    cfl_predict_hbd_16x4_avx2,  /* 16x4  */
    cfl_predict_hbd_8x32_ssse3, /* 8x32  */
    cfl_predict_hbd_32x8_avx2,  /* 32x8  */
    NULL,                       /* 16x64 (invalid CFL size) */
    NULL,                       /* 64x16 (invalid CFL size) */
  };
  // Modulo TX_SIZES_ALL to ensure that an attacker won't be able to index the
  // function pointer array out of bounds.
  return pred[tx_size % TX_SIZES_ALL];
}
#endif  // CONFIG_AV1_HIGHBITDEPTH

// Returns a vector where all the (32-bits) elements are the sum of all the
// lanes in a.
static inline __m256i fill_sum_epi32(__m256i a) {}

static inline __m256i _mm256_addl_epi16(__m256i a) {}

static inline void subtract_average_avx2(const uint16_t *src_ptr,
                                         int16_t *dst_ptr, int width,
                                         int height, int round_offset,
                                         int num_pel_log2) {}

// Declare wrappers for AVX2 sizes
CFL_SUB_AVG_X()
CFL_SUB_AVG_X()
CFL_SUB_AVG_X()
CFL_SUB_AVG_X()
CFL_SUB_AVG_X()
CFL_SUB_AVG_X()
CFL_SUB_AVG_X()

// Based on the observation that for small blocks AVX2 does not outperform
// SSE2, we call the SSE2 code for block widths 4 and 8.
cfl_subtract_average_fn cfl_get_subtract_average_fn_avx2(TX_SIZE tx_size) {}