#ifndef AOM_AV1_ENCODER_INTRA_MODE_SEARCH_UTILS_H_
#define AOM_AV1_ENCODER_INTRA_MODE_SEARCH_UTILS_H_
#include "av1/common/enums.h"
#include "av1/common/pred_common.h"
#include "av1/common/reconintra.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/encodeframe.h"
#include "av1/encoder/model_rd.h"
#include "av1/encoder/palette.h"
#include "av1/encoder/hybrid_fwd_txfm.h"
#ifdef __cplusplus
extern "C" {
#endif
#define INTRA_RD_VAR_THRESH(X) …
#define BINS …
static const float av1_intra_hog_model_bias[DIRECTIONAL_MODES] = …;
static const float av1_intra_hog_model_weights[BINS * DIRECTIONAL_MODES] = …;
static const NN_CONFIG av1_intra_hog_model_nnconfig = …;
#define FIX_PREC_BITS …
static inline int get_hist_bin_idx(int dx, int dy) { … }
#undef FIX_PREC_BITS
static inline void normalize_hog(float total, float *hist) { … }
static inline void lowbd_generate_hog(const uint8_t *src, int stride, int rows,
int cols, float *hist) { … }
static inline void lowbd_compute_gradient_info_sb(MACROBLOCK *const x,
BLOCK_SIZE sb_size,
PLANE_TYPE plane) { … }
#if CONFIG_AV1_HIGHBITDEPTH
static inline void highbd_generate_hog(const uint8_t *src8, int stride,
int rows, int cols, float *hist) {
float total = 0.1f;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
src += stride;
for (int r = 1; r < rows - 1; ++r) {
for (int c = 1; c < cols - 1; ++c) {
const uint16_t *above = &src[c - stride];
const uint16_t *below = &src[c + stride];
const uint16_t *left = &src[c - 1];
const uint16_t *right = &src[c + 1];
const int dx = (right[-stride] + 2 * right[0] + right[stride]) -
(left[-stride] + 2 * left[0] + left[stride]);
const int dy = (below[-1] + 2 * below[0] + below[1]) -
(above[-1] + 2 * above[0] + above[1]);
if (dx == 0 && dy == 0) continue;
const int temp = abs(dx) + abs(dy);
if (!temp) continue;
total += temp;
if (dx == 0) {
hist[0] += temp / 2;
hist[BINS - 1] += temp / 2;
} else {
const int idx = get_hist_bin_idx(dx, dy);
assert(idx >= 0 && idx < BINS);
hist[idx] += temp;
}
}
src += stride;
}
normalize_hog(total, hist);
}
static inline void highbd_compute_gradient_info_sb(MACROBLOCK *const x,
BLOCK_SIZE sb_size,
PLANE_TYPE plane) {
PixelLevelGradientInfo *const grad_info_sb =
x->pixel_gradient_info + plane * MAX_SB_SQUARE;
const uint16_t *src = CONVERT_TO_SHORTPTR(x->plane[plane].src.buf);
const int stride = x->plane[plane].src.stride;
const int ss_x = x->e_mbd.plane[plane].subsampling_x;
const int ss_y = x->e_mbd.plane[plane].subsampling_y;
const int sb_height = block_size_high[sb_size] >> ss_y;
const int sb_width = block_size_wide[sb_size] >> ss_x;
src += stride;
for (int r = 1; r < sb_height - 1; ++r) {
for (int c = 1; c < sb_width - 1; ++c) {
const uint16_t *above = &src[c - stride];
const uint16_t *below = &src[c + stride];
const uint16_t *left = &src[c - 1];
const uint16_t *right = &src[c + 1];
const int dx = (right[-stride] + 2 * right[0] + right[stride]) -
(left[-stride] + 2 * left[0] + left[stride]);
const int dy = (below[-1] + 2 * below[0] + below[1]) -
(above[-1] + 2 * above[0] + above[1]);
grad_info_sb[r * sb_width + c].is_dx_zero = (dx == 0);
grad_info_sb[r * sb_width + c].abs_dx_abs_dy_sum =
(uint16_t)(abs(dx) + abs(dy));
grad_info_sb[r * sb_width + c].hist_bin_idx =
(dx != 0) ? get_hist_bin_idx(dx, dy) : -1;
}
src += stride;
}
}
#endif
static inline void generate_hog(const uint8_t *src8, int stride, int rows,
int cols, float *hist, int highbd) { … }
static inline void compute_gradient_info_sb(MACROBLOCK *const x,
BLOCK_SIZE sb_size,
PLANE_TYPE plane) { … }
static inline bool is_gradient_caching_for_hog_enabled(
const AV1_COMP *const cpi) { … }
static inline void produce_gradients_for_sb(AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE sb_size, int mi_row,
int mi_col) { … }
static inline void generate_hog_using_gradient_cache(const MACROBLOCK *x,
int rows, int cols,
BLOCK_SIZE sb_size,
PLANE_TYPE plane,
float *hist) { … }
static inline void collect_hog_data(const MACROBLOCK *x, BLOCK_SIZE bsize,
BLOCK_SIZE sb_size, int plane, float *hog) { … }
static inline void prune_intra_mode_with_hog(
const MACROBLOCK *x, BLOCK_SIZE bsize, BLOCK_SIZE sb_size, float th,
uint8_t *directional_mode_skip_mask, int is_chroma) { … }
#undef BINS
int av1_calc_normalized_variance(aom_variance_fn_t vf, const uint8_t *const buf,
const int stride, const int is_hbd);
static inline bool is_src_var_for_4x4_sub_blocks_caching_enabled(
const AV1_COMP *const cpi) { … }
static inline void init_src_var_info_of_4x4_sub_blocks(
const AV1_COMP *const cpi, Block4x4VarInfo *src_var_info_of_4x4_sub_blocks,
const BLOCK_SIZE sb_size) { … }
static inline int write_uniform_cost(int n, int v) { … }
static inline int intra_mode_info_cost_y(const AV1_COMP *cpi,
const MACROBLOCK *x,
const MB_MODE_INFO *mbmi,
BLOCK_SIZE bsize, int mode_cost,
int discount_color_cost) { … }
static inline int intra_mode_info_cost_uv(const AV1_COMP *cpi,
const MACROBLOCK *x,
const MB_MODE_INFO *mbmi,
BLOCK_SIZE bsize, int mode_cost) { … }
static int64_t intra_model_rd(const AV1_COMMON *cm, MACROBLOCK *const x,
int plane, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, int use_hadamard) { … }
static inline int model_intra_yrd_and_prune(const AV1_COMP *const cpi,
MACROBLOCK *x, BLOCK_SIZE bsize,
int64_t *best_model_rd) { … }
#ifdef __cplusplus
}
#endif
#endif