#include <assert.h>
#include "config/aom_config.h"
#include "aom_util/aom_pthread.h"
#if CONFIG_TFLITE
#include "tensorflow/lite/c/c_api.h"
#include "av1/encoder/deltaq4_model.c"
#endif
#include "av1/common/common_data.h"
#include "av1/common/enums.h"
#include "av1/common/idct.h"
#include "av1/common/reconinter.h"
#include "av1/encoder/allintra_vis.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/ethread.h"
#include "av1/encoder/hybrid_fwd_txfm.h"
#include "av1/encoder/model_rd.h"
#include "av1/encoder/rdopt_utils.h"
#define MB_WIENER_PRED_BLOCK_SIZE …
#define MB_WIENER_PRED_BUF_STRIDE …
void av1_alloc_mb_wiener_var_pred_buf(AV1_COMMON *cm, ThreadData *td) { … }
void av1_dealloc_mb_wiener_var_pred_buf(ThreadData *td) { … }
void av1_init_mb_wiener_var_buffer(AV1_COMP *cpi) { … }
static int64_t get_satd(AV1_COMP *const cpi, BLOCK_SIZE bsize, int mi_row,
int mi_col) { … }
static int64_t get_sse(AV1_COMP *const cpi, BLOCK_SIZE bsize, int mi_row,
int mi_col) { … }
static double get_max_scale(AV1_COMP *const cpi, BLOCK_SIZE bsize, int mi_row,
int mi_col) { … }
static int get_window_wiener_var(AV1_COMP *const cpi, BLOCK_SIZE bsize,
int mi_row, int mi_col) { … }
static int get_var_perceptual_ai(AV1_COMP *const cpi, BLOCK_SIZE bsize,
int mi_row, int mi_col) { … }
static int rate_estimator(const tran_low_t *qcoeff, int eob, TX_SIZE tx_size) { … }
void av1_calc_mb_wiener_var_row(AV1_COMP *const cpi, MACROBLOCK *x,
MACROBLOCKD *xd, const int mi_row,
int16_t *src_diff, tran_low_t *coeff,
tran_low_t *qcoeff, tran_low_t *dqcoeff,
double *sum_rec_distortion,
double *sum_est_rate, uint8_t *pred_buffer) { … }
static void calc_mb_wiener_var(AV1_COMP *const cpi, double *sum_rec_distortion,
double *sum_est_rate) { … }
static int64_t estimate_wiener_var_norm(AV1_COMP *const cpi,
const BLOCK_SIZE norm_block_size) { … }
static void automatic_intra_tools_off(AV1_COMP *cpi,
const double sum_rec_distortion,
const double sum_est_rate) { … }
static void ext_rate_guided_quantization(AV1_COMP *cpi) { … }
void av1_set_mb_wiener_variance(AV1_COMP *cpi) { … }
static int get_rate_guided_quantizer(AV1_COMP *const cpi, BLOCK_SIZE bsize,
int mi_row, int mi_col) { … }
int av1_get_sbq_perceptual_ai(AV1_COMP *const cpi, BLOCK_SIZE bsize, int mi_row,
int mi_col) { … }
void av1_init_mb_ur_var_buffer(AV1_COMP *cpi) { … }
#if CONFIG_TFLITE
static int model_predict(BLOCK_SIZE block_size, int num_cols, int num_rows,
int bit_depth, uint8_t *y_buffer, int y_stride,
float *predicts0, float *predicts1) {
TfLiteModel *model =
TfLiteModelCreate(av1_deltaq4_model_file, av1_deltaq4_model_fsize);
if (model == NULL) return 1;
TfLiteInterpreterOptions *options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsSetNumThreads(options, 2);
if (options == NULL) {
TfLiteModelDelete(model);
return 1;
}
TfLiteInterpreter *interpreter = TfLiteInterpreterCreate(model, options);
if (interpreter == NULL) {
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
return 1;
}
TfLiteInterpreterAllocateTensors(interpreter);
TfLiteTensor *input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
if (input_tensor == NULL) {
TfLiteInterpreterDelete(interpreter);
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
return 1;
}
size_t input_size = TfLiteTensorByteSize(input_tensor);
float *input_data = aom_calloc(input_size, 1);
if (input_data == NULL) {
TfLiteInterpreterDelete(interpreter);
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
return 1;
}
const int num_mi_w = mi_size_wide[block_size];
const int num_mi_h = mi_size_high[block_size];
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
const int row_offset = (row * num_mi_h) << 2;
const int col_offset = (col * num_mi_w) << 2;
uint8_t *buf = y_buffer + row_offset * y_stride + col_offset;
int r = row_offset, pos = 0;
const float base = (float)((1 << bit_depth) - 1);
while (r < row_offset + (num_mi_h << 2)) {
for (int c = 0; c < (num_mi_w << 2); ++c) {
input_data[pos++] = bit_depth > 8
? (float)*CONVERT_TO_SHORTPTR(buf + c) / base
: (float)*(buf + c) / base;
}
buf += y_stride;
++r;
}
TfLiteTensorCopyFromBuffer(input_tensor, input_data, input_size);
if (TfLiteInterpreterInvoke(interpreter) != kTfLiteOk) {
TfLiteInterpreterDelete(interpreter);
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
return 1;
}
const TfLiteTensor *output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
if (output_tensor == NULL) {
TfLiteInterpreterDelete(interpreter);
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
return 1;
}
size_t output_size = TfLiteTensorByteSize(output_tensor);
float output_data[2];
TfLiteTensorCopyToBuffer(output_tensor, output_data, output_size);
predicts0[row * num_cols + col] = output_data[0];
predicts1[row * num_cols + col] = output_data[1];
}
}
TfLiteInterpreterDelete(interpreter);
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
aom_free(input_data);
return 0;
}
void av1_set_mb_ur_variance(AV1_COMP *cpi) {
const AV1_COMMON *cm = &cpi->common;
const CommonModeInfoParams *const mi_params = &cm->mi_params;
uint8_t *y_buffer = cpi->source->y_buffer;
const int y_stride = cpi->source->y_stride;
const int block_size = cpi->common.seq_params->sb_size;
const uint32_t bit_depth = cpi->td.mb.e_mbd.bd;
const int num_mi_w = mi_size_wide[block_size];
const int num_mi_h = mi_size_high[block_size];
const int num_cols = (mi_params->mi_cols + num_mi_w - 1) / num_mi_w;
const int num_rows = (mi_params->mi_rows + num_mi_h - 1) / num_mi_h;
float *mb_delta_q0, *mb_delta_q1, delta_q_avg0 = 0.0f;
CHECK_MEM_ERROR(cm, mb_delta_q0,
aom_calloc(num_rows * num_cols, sizeof(float)));
CHECK_MEM_ERROR(cm, mb_delta_q1,
aom_calloc(num_rows * num_cols, sizeof(float)));
if (model_predict(block_size, num_cols, num_rows, bit_depth, y_buffer,
y_stride, mb_delta_q0, mb_delta_q1)) {
aom_internal_error(cm->error, AOM_CODEC_ERROR,
"Failed to call TFlite functions.");
}
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
const int index = row * num_cols + col;
delta_q_avg0 += mb_delta_q0[index];
}
}
delta_q_avg0 /= (float)(num_rows * num_cols);
float scaling_factor;
const float cq_level = (float)cpi->oxcf.rc_cfg.cq_level / (float)MAXQ;
if (cq_level < delta_q_avg0) {
scaling_factor = cq_level / delta_q_avg0;
} else {
scaling_factor = 1.0f - (cq_level - delta_q_avg0) / (1.0f - delta_q_avg0);
}
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
const int index = row * num_cols + col;
cpi->mb_delta_q[index] =
RINT((float)cpi->oxcf.q_cfg.deltaq_strength / 100.0f * (float)MAXQ *
scaling_factor * (mb_delta_q0[index] - delta_q_avg0));
}
}
aom_free(mb_delta_q0);
aom_free(mb_delta_q1);
}
#else
void av1_set_mb_ur_variance(AV1_COMP *cpi) { … }
#endif
int av1_get_sbq_user_rating_based(AV1_COMP *const cpi, int mi_row, int mi_col) { … }