chromium/third_party/libvpx/source/libvpx/vpx_dsp/x86/inv_txfm_avx2.c

/*
 *  Copyright (c) 2023 The WebM project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

#include <immintrin.h>  // AVX2

#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/txfm_common.h"

#define PAIR256_SET_EPI16(a, b)

static INLINE void idct_load16x16(const tran_low_t *input, __m256i *in,
                                  int stride) {}

static INLINE __m256i dct_round_shift_avx2(__m256i in) {}

static INLINE __m256i idct_madd_round_shift_avx2(__m256i *in, __m256i *cospi) {}

// Calculate the dot product between in0/1 and x and wrap to short.
static INLINE __m256i idct_calc_wraplow_avx2(__m256i *in0, __m256i *in1,
                                             __m256i *x) {}

// Multiply elements by constants and add them together.
static INLINE void butterfly16(__m256i in0, __m256i in1, int c0, int c1,
                               __m256i *out0, __m256i *out1) {}

static INLINE void idct16_16col(__m256i *in, __m256i *out) {}

static INLINE void recon_and_store16(uint8_t *dest, __m256i in_x) {}

static INLINE void write_buffer_16x1(uint8_t *dest, __m256i in) {}

static INLINE void store_buffer_16x32(__m256i *in, uint8_t *dst, int stride) {}

static INLINE void transpose2_8x8_avx2(__m256i *in, __m256i *out) {}

static INLINE void transpose_16bit_16x16_avx2(__m256i *in, __m256i *out) {}

void vpx_idct16x16_256_add_avx2(const tran_low_t *input, uint8_t *dest,
                                int stride) {}

// Only do addition and subtraction butterfly, size = 16, 32
static INLINE void add_sub_butterfly_avx2(__m256i *in, __m256i *out, int size) {}

// For each 16x32 block __m256i in[32],
// Input with index, 0, 4, 8, 12, 16, 20, 24, 28
// output pixels: 0-7 in __m256i out[32]
static INLINE void idct32_1024_16x32_quarter_1(__m256i *in, __m256i *out) {}

static INLINE void idct32_16x32_quarter_2_stage_4_to_6(__m256i *step1,
                                                       __m256i *out) {}

// For each 16x32 block __m256i in[32],
// Input with index, 2, 6, 10, 14, 18, 22, 26, 30
// output pixels: 8-15 in __m256i out[32]
static INLINE void idct32_1024_16x32_quarter_2(__m256i *in, __m256i *out) {}

static INLINE void idct32_16x32_quarter_3_4_stage_4_to_7(__m256i *step1,
                                                         __m256i *out) {}

static INLINE void idct32_1024_16x32_quarter_1_2(__m256i *in, __m256i *out) {}

// For each 16x32 block __m256i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
// output pixels: 16-23, 24-31 in __m256i out[32]
static INLINE void idct32_1024_16x32_quarter_3_4(__m256i *in, __m256i *out) {}

static INLINE void idct32_1024_16x32(__m256i *in, __m256i *out) {}

void vpx_idct32x32_1024_add_avx2(const tran_low_t *input, uint8_t *dest,
                                 int stride) {}

// Case when only upper-left 16x16 has non-zero coeff
void vpx_idct32x32_135_add_avx2(const tran_low_t *input, uint8_t *dest,
                                int stride) {}