#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <fp16/fp16.h>
#include "xnnpack.h"
#include "xnnpack/allocator.h"
#include "xnnpack/cache.h"
#include "xnnpack/common.h"
#include "xnnpack/compute.h"
#include "xnnpack/config-types.h"
#include "xnnpack/config.h"
#include "xnnpack/indirection.h"
#include "xnnpack/log.h"
#include "xnnpack/math.h"
#include "xnnpack/microfnptr.h"
#include "xnnpack/microkernel-type.h"
#include "xnnpack/microkernel-utils.h"
#include "xnnpack/microparams-init.h"
#include "xnnpack/microparams.h"
#include "xnnpack/operator-type.h"
#include "xnnpack/operator-utils.h"
#include "xnnpack/operator.h"
#include "xnnpack/pack.h"
#include "xnnpack/params.h"
#include "xnnpack/post-operation.h"
#include "pthreadpool.h"
#ifndef XNN_ENABLE_GEMM_M_SPECIALIZATION
#error "XNN_ENABLE_GEMM_M_SPECIALIZATION is not defined"
#endif
static inline size_t compute_output_dimension_with_tf_same_padding(
size_t input_dimension,
size_t subsampling_dimension)
{ … }
static inline const struct xnn_dwconv_config* find_dwconv_ukernel(
size_t kernel_size,
const struct xnn_dwconv_config* ukernel,
size_t num_ukernels)
{ … }
static enum xnn_status create_vmulcaddc_path(
uint32_t groups,
const void* kernel,
const void* bias,
uint32_t log2_filter_element_size,
uint32_t bias_element_size,
xnn_pack_vmulcaddc_w_fn pack_vmulcaddc_w,
const void* packing_params,
int packed_weights_padding_byte,
const void* vmulcaddc_params,
size_t vmulcaddc_params_size,
const struct xnn_vmulcaddc_config* vmulcaddc_config,
enum xnn_operator_type operator_type,
xnn_operator_t convolution_op)
{ … }
static enum xnn_status create_dwconv_path(
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t groups,
const void* kernel,
const void* bias,
uint32_t flags,
uint32_t log2_input_element_size,
uint32_t log2_filter_element_size,
uint32_t bias_element_size,
xnn_pack_dwconv_hwg_w_fn pack_dwconv_hwg_w,
xnn_pack_dwconv_ghw_w_fn pack_dwconv_ghw_w,
const void* packing_params,
int packed_weights_padding_byte,
size_t extra_weights_bytes,
xnn_init_qs8_qc8w_scale_params_fn init_scale_params,
const float* scale_params,
const void* dwconv_params,
size_t dwconv_params_size,
const struct xnn_dwconv_config* dwconv_ukernel,
bool linear_activation,
enum xnn_operator_type operator_type,
size_t* zero_size,
xnn_operator_t convolution_op)
{ … }
static enum xnn_status create_gemm_or_igemm(
enum xnn_microkernel_type ukernel_type,
uint32_t kernel_size,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
const void* kernel,
const void* bias,
uint32_t flags,
uint32_t log2_input_element_size,
uint32_t log2_filter_element_size,
uint32_t bias_element_size,
xnn_packw_gemm_goi_ukernel_fn pack_gemm_goi_w,
xnn_pack_conv_kgo_w_fn pack_conv_kgo_w,
xnn_pack_conv_goki_w_fn pack_conv_goki_w,
const void* packing_params,
int packed_weights_padding_byte,
size_t extra_weights_bytes,
xnn_init_qs8_qc8w_scale_params_fn init_scale_params,
const float* scale_params,
xnn_init_qs8_qc8w_scale_params_fn init_kernel_scale_params,
const float* kernel_scale_params,
const void* gemm_params,
size_t gemm_params_size,
const struct xnn_gemm_config* gemm_config,
const struct jit_gemm_params* jit_gemm_params,
bool linear_activation,
bool relu_activation,
enum xnn_operator_type operator_type,
size_t num_post_operations,
void* post_operation_params,
xnn_operator_t convolution_op,
size_t* zero_size)
{ … }
static enum xnn_status create_convolution2d_nhwc(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const void* kernel,
const void* bias,
uint32_t flags,
uint32_t log2_input_element_size,
uint32_t log2_filter_element_size,
uint32_t bias_element_size,
xnn_pack_vmulcaddc_w_fn pack_vmulcaddc_w,
xnn_pack_dwconv_hwg_w_fn pack_dwconv_hwg_w,
xnn_pack_dwconv_ghw_w_fn pack_dwconv_ghw_w,
xnn_packw_gemm_goi_ukernel_fn pack_gemm_goi_w,
xnn_pack_conv_kgo_w_fn pack_conv_kgo_w,
xnn_pack_conv_goki_w_fn pack_conv_goki_w,
const void* packing_params,
int input_padding_byte,
int packed_weights_padding_byte,
size_t extra_weights_bytes,
xnn_init_qs8_qc8w_scale_params_fn init_scale_params,
const float* scale_params,
xnn_init_qs8_qc8w_scale_params_fn init_kernel_scale_params,
const float* kernel_scale_params,
const void* gemm_params,
size_t gemm_params_size,
const void* dwconv_params,
size_t dwconv_params_size,
const void* vmulcaddc_params,
size_t vmulcaddc_params_size,
const struct xnn_gemm_config* gemm_config,
const struct xnn_dwconv_config* dwconv_ukernel,
const struct xnn_vmulcaddc_config* vmulcaddc_config,
struct jit_gemm_params* jit_gemm_params,
bool linear_activation,
bool relu_activation,
enum xnn_operator_type operator_type,
size_t num_post_operations,
void* post_operation_params,
bool dynamic_quantization,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_convolution2d_nhwc_qd8_f16_qc8w(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const float* kernel_scale,
const int8_t* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_convolution2d_nhwc_qd8_f32_qc8w(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const float* kernel_scale,
const int8_t* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_convolution2d_nhwc_qu8(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
uint8_t input_zero_point,
float input_scale,
uint8_t kernel_zero_point,
float kernel_scale,
const uint8_t* kernel,
const int32_t* bias,
uint8_t output_zero_point,
float output_scale,
uint8_t output_min,
uint8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_convolution2d_nhwc_qs8(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
int8_t input_zero_point,
float input_scale,
float kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_convolution2d_nhwc_qs8_qc8w(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
int8_t input_zero_point,
float input_scale,
const float* kernel_scale,
const int8_t* kernel,
const int32_t* bias,
int8_t output_zero_point,
float output_scale,
int8_t output_min,
int8_t output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_convolution2d_nhwc_f16(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const void* kernel,
const void* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_convolution2d_nhwc_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const float* kernel,
const float* bias,
float output_min,
float output_max,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
enum xnn_status xnn_create_fused_convolution2d_nhwc_f32(
uint32_t input_padding_top,
uint32_t input_padding_right,
uint32_t input_padding_bottom,
uint32_t input_padding_left,
uint32_t kernel_height,
uint32_t kernel_width,
uint32_t subsampling_height,
uint32_t subsampling_width,
uint32_t dilation_height,
uint32_t dilation_width,
uint32_t groups,
size_t group_input_channels,
size_t group_output_channels,
size_t input_channel_stride,
size_t output_channel_stride,
const float* kernel,
const float* bias,
size_t num_post_operations,
struct xnn_post_operation* post_operations,
uint32_t flags,
xnn_code_cache_t code_cache,
xnn_weights_cache_t weights_cache,
xnn_operator_t* convolution_op_out)
{ … }
static inline bool input_size_changed(xnn_operator_t convolution_op)
{ … }
static enum xnn_status reshape_gemm(
xnn_operator_t convolution_op,
uint32_t log2_input_element_size,
uint32_t log2_filter_element_size,
uint32_t extra_weights_elements_size,
uint32_t log2_output_element_size,
size_t* workspace_size,
size_t* workspace_alignment,
size_t num_threads)
{ … }
static enum xnn_status reshape_igemm(
xnn_operator_t convolution_op,
uint32_t log2_input_element_size,
uint32_t log2_filter_element_size,
uint32_t extra_weights_elements_size,
uint32_t log2_output_element_size,
bool dynamic_quantization,
size_t* workspace_size,
size_t* workspace_alignment,
size_t num_threads)
{ … }
static enum xnn_status reshape_dwconv(
xnn_operator_t convolution_op,
uint32_t log2_input_element_size,
uint32_t log2_accumulator_element_size,
uint32_t log2_output_element_size,
size_t* workspace_size,
size_t* workspace_alignment,
size_t num_threads)
{ … }
static enum xnn_status reshape_vmulcaddc(
xnn_operator_t convolution_op,
uint32_t log2_input_element_size,
uint32_t log2_output_element_size,
size_t* workspace_size,
size_t* workspace_alignment,
size_t num_threads)
{ … }
static enum xnn_status reshape_convolution2d_nhwc(
xnn_operator_t convolution_op,
enum xnn_operator_type expected_operator_type,
size_t batch_size,
size_t input_height,
size_t input_width,
uint32_t log2_input_element_size,
uint32_t log2_filter_element_size,
uint32_t log2_accumulator_element_size,
uint32_t extra_weights_elements_size,
uint32_t log2_output_element_size,
bool dynamic_quantization,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f16_qc8w(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f32_qc8w(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
enum xnn_status xnn_reshape_convolution2d_nhwc_qu8(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
enum xnn_status xnn_reshape_convolution2d_nhwc_qs8(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
enum xnn_status xnn_reshape_convolution2d_nhwc_qs8_qc8w(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
enum xnn_status xnn_reshape_convolution2d_nhwc_f16(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
enum xnn_status xnn_reshape_convolution2d_nhwc_f32(
xnn_operator_t convolution_op,
size_t batch_size,
size_t input_height,
size_t input_width,
size_t* workspace_size,
size_t* workspace_alignment,
size_t* output_height_out,
size_t* output_width_out,
pthreadpool_t threadpool)
{ … }
static enum xnn_status setup_gemm(xnn_operator_t convolution_op)
{ … }
static enum xnn_status setup_igemm(
xnn_operator_t convolution_op,
void* workspace,
uint32_t log2_input_element_size)
{ … }
static enum xnn_status setup_dwconv(
xnn_operator_t convolution_op,
void* workspace,
uint32_t log2_input_element_size)
{ … }
static enum xnn_status setup_vmulcaddc(xnn_operator_t convolution_op)
{ … }
static enum xnn_status setup_convolution2d_nhwc(
xnn_operator_t convolution_op,
enum xnn_operator_type expected_operator_type,
void* workspace,
const void* input,
void* output,
const void* quantization_params,
uint32_t log2_input_element_size)
{ … }
enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f16_qc8w(
xnn_operator_t convolution_op,
void* workspace,
const int8_t* input,
void* output,
const struct xnn_dynamic_quantization_params* quantization_params)
{ … }
enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f32_qc8w(
xnn_operator_t convolution_op,
void* workspace,
const int8_t* input,
float* output,
const struct xnn_dynamic_quantization_params* quantization_params)
{ … }
enum xnn_status xnn_setup_convolution2d_nhwc_qu8(
xnn_operator_t convolution_op,
void* workspace,
const uint8_t* input,
uint8_t* output)
{ … }
enum xnn_status xnn_setup_convolution2d_nhwc_qs8(
xnn_operator_t convolution_op,
void* workspace,
const int8_t* input,
int8_t* output)
{ … }
enum xnn_status xnn_setup_convolution2d_nhwc_qs8_qc8w(
xnn_operator_t convolution_op,
void* workspace,
const int8_t* input,
int8_t* output)
{ … }
enum xnn_status xnn_setup_convolution2d_nhwc_f16(
xnn_operator_t convolution_op,
void* workspace,
const void* input,
void* output)
{ … }
enum xnn_status xnn_setup_convolution2d_nhwc_f32(
xnn_operator_t convolution_op,
void* workspace,
const float* input,
float* output)
{ … }