#include <fp16/fp16.h>
#include <assert.h>
#include <float.h>
#include <fxdiv.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef M_LN2
#define M_LN2 …
#endif
#include "xnnpack/argmaxpool.h"
#include "xnnpack/avgpool.h"
#include "xnnpack/common.h"
#include "xnnpack/conv.h"
#include "xnnpack/dwconv.h"
#include "xnnpack/fill.h"
#include "xnnpack/gavgpool.h"
#include "xnnpack/gemm.h"
#include "xnnpack/ibilinear.h"
#include "xnnpack/igemm.h"
#include "xnnpack/lut.h"
#include "xnnpack/math.h"
#include "xnnpack/maxpool.h"
#include "xnnpack/microparams.h"
#include "xnnpack/packq.h"
#include "xnnpack/packw.h"
#include "xnnpack/pad.h"
#include "xnnpack/pavgpool.h"
#include "xnnpack/prelu.h"
#include "xnnpack/raddstoreexpminusmax.h"
#include "xnnpack/reduce.h"
#include "xnnpack/simd/f32-scalar.h"
#include "xnnpack/simd/s32-scalar.h"
#include "xnnpack/spmm.h"
#include "xnnpack/transpose.h"
#include "xnnpack/unaligned.h"
#include "xnnpack/unpool.h"
#include "xnnpack/vbinary.h"
#include "xnnpack/vcvt.h"
#include "xnnpack/vlrelu.h"
#include "xnnpack/vmulcaddc.h"
#include "xnnpack/vunary.h"
#include "xnnpack/zip.h"
void xnn_f16_f32_vcvt_ukernel__scalar_u1(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f16_f32_vcvt_ukernel__scalar_u4(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f16_qs8_vcvt_ukernel__scalar_imagic_u4(
size_t batch,
const void* input,
int8_t* output,
const union xnn_f16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f16_rmax_ukernel__scalar_u2_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f16_rminmax_ukernel__scalar_u2_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_argmaxpool_ukernel_4x__scalar_c1(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment)
{ … }
void xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* accumulation_buffer,
uint32_t* index_buffer,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment)
{ … }
void xnn_f32_argmaxpool_ukernel_9x__scalar_c1(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment)
{ … }
void xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_avgpool_minmax_ukernel_9x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__scalar_1x1(
size_t input_height,
size_t input_width,
size_t output_y_start,
size_t output_y_end,
const float* input,
const float* zero,
const float* weights,
float* output,
size_t input_padding_top,
size_t output_channels,
size_t output_height_stride,
size_t output_channel_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_minmax_ukernel_25p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_ukernel_25p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_minmax_ukernel_2f2m2l4c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_minmax_ukernel_3p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_ukernel_3p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_minmax_ukernel_4p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_ukernel_4p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_minmax_ukernel_9p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv_ukernel_9p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_2x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_4x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_2x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_2x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_2x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_f16_vcvt_ukernel__scalar_bitcast_u4(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_f16_vcvt_ukernel__scalar_fabsf_u2(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gavgpool_cw_ukernel__scalar_u1(
size_t elements,
size_t channels,
const float* input,
float* output,
const union xnn_f32_gavgpool_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* buffer,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gavgpool_minmax_ukernel_7x__scalar_c1(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_relu_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_minmax_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_relu_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_minmax_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_relu_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_gemm_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_ibilinear_chw_ukernel__scalar_p4(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment)
{ … }
void xnn_f32_ibilinear_ukernel__scalar_c2(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t output_increment)
{ … }
void xnn_f32_igemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_relu_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_minmax_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_relu_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_minmax_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_relu_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_igemm_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_pavgpool_minmax_ukernel_9x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_prelu_ukernel__scalar_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride)
{ … }
void xnn_f32_qc4w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qc4w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qc8w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qc8w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qs8_vcvt_ukernel__scalar_imagic_u4(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qs8_vcvt_ukernel__scalar_lrintf_u4(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qu8_vcvt_ukernel__scalar_imagic_u1(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qu8_vcvt_ukernel__scalar_imagic_u4(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_qu8_vcvt_ukernel__scalar_lrintf_u4(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_u4_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_rdsum_ukernel_7p7x__scalar_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_rmax_ukernel__scalar_u4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_rminmax_ukernel__scalar_u4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_rsum_ukernel__scalar_u4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_spmm_minmax_ukernel_8x1__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_spmm_minmax_ukernel_8x2__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_spmm_minmax_ukernel_8x4__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vadd_minmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vaddc_minmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vdiv_minmax_ukernel__scalar_u2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vdivc_minmax_ukernel__scalar_u2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vmaxc_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vmin_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vminc_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vmul_minmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vmulc_minmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrdivc_minmax_ukernel__scalar_u2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrsubc_minmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vsqrdiff_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vsqrdiffc_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vsub_minmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vsubc_minmax_ukernel__scalar_u8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vclamp_ukernel__scalar_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vcmul_ukernel__scalar_u4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_u2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vhswish_ukernel__scalar_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vlrelu_ukernel__scalar_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vmulcaddc_minmax_ukernel_c1__scalar_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrelu_ukernel__scalar_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndd_ukernel__scalar_libm_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndd_ukernel__scalar_libm_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndne_ukernel__scalar_libm_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndne_ukernel__scalar_libm_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndu_ukernel__scalar_libm_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndu_ukernel__scalar_libm_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndz_ukernel__scalar_libm_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrndz_ukernel__scalar_libm_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrsqrt_ukernel__scalar_rsqrt_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rsqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vrsqrt_ukernel__scalar_rsqrt_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rsqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_u2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_f32_vsqrt_ukernel__scalar_sqrt_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_qd8_f32_qb4w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qb4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_qd8_f32_qb4w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qb4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }
void xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_2x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x2__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const int8_t* zero_data,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const int8_t* zero_data,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_2x2__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const int8_t* zero_data,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const int8_t* zero_data,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs16_qs8_vcvt_ukernel__scalar_u4(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_dwconv_minmax_fp32_ukernel_25p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_dwconv_minmax_fp32_ukernel_25p1c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_dwconv_minmax_fp32_ukernel_25p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_dwconv_minmax_fp32_ukernel_9p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_dwconv_minmax_fp32_ukernel_9p2c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_dwconv_minmax_fp32_ukernel_9p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_f32_vcvt_ukernel__scalar_u1(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_f32_vcvt_ukernel__scalar_u4(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p1c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p2c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p2c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vadd_minmax_ukernel__scalar_u1(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vadd_minmax_ukernel__scalar_u4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vaddc_minmax_ukernel__scalar_u1(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vaddc_minmax_ukernel__scalar_u4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vcvt_ukernel__scalar_u1(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vcvt_ukernel__scalar_u4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vlrelu_ukernel__scalar_andxor_u4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vlrelu_ukernel__scalar_select_u4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vmul_minmax_fp32_ukernel__scalar_u4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_u4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__scalar_imagic_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_avgpool_minmax_fp32_ukernel_9x__scalar_imagic_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_dwconv_minmax_fp32_ukernel_25p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_dwconv_minmax_fp32_ukernel_25p1c__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_dwconv_minmax_fp32_ukernel_25p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_dwconv_minmax_fp32_ukernel_9p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_dwconv_minmax_fp32_ukernel_9p2c__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_dwconv_minmax_fp32_ukernel_9p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_f32_vcvt_ukernel__scalar_u1(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_f32_vcvt_ukernel__scalar_u4(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gemm_minmax_fp32_ukernel_2x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_igemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_igemm_minmax_fp32_ukernel_2x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vadd_minmax_ukernel__scalar_u1(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vadd_minmax_ukernel__scalar_u4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vaddc_minmax_ukernel__scalar_u1(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vaddc_minmax_ukernel__scalar_u4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vcvt_ukernel__scalar_u1(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vcvt_ukernel__scalar_u4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vlrelu_ukernel__scalar_andxor_u4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vlrelu_ukernel__scalar_select_u4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vmul_minmax_fp32_ukernel__scalar_u4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_u4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_s8_ibilinear_ukernel__scalar_c1(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment)
{ … }void xnn_s8_maxpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_s8_vclamp_ukernel__scalar_u4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_u8_ibilinear_ukernel__scalar_c1(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment)
{ … }static inline uint32_t compute_sum(
size_t n,
const uint8_t* x,
const uint32_t* t)
{ … }void xnn_u8_lut32norm_ukernel__scalar(
size_t n,
const uint8_t* x,
const uint32_t* t,
uint8_t* y)
{ … }void xnn_u8_maxpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_u8_rmax_ukernel__scalar_u2(
size_t batch,
const uint8_t* input,
uint8_t* output,
const void* params)
{ … }void xnn_u8_vclamp_ukernel__scalar_u4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_x16_packw_gemm_goi_ukernel_x64__scalar_int_u4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint16_t* weights,
const uint16_t* bias,
const void* scale,
uint16_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x16_transposec_ukernel__2x4_scalar_int(
const uint16_t *input,
uint16_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x24_transposec_ukernel__1x2_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x32_packw_gemm_goi_ukernel_x2__scalar_float_u4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
const void* scale,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x32_packw_gemm_goi_ukernel_x4__scalar_float_u4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
const void* scale,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x32_transposec_ukernel__2x4_scalar_int(
const uint32_t *input,
uint32_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x32_unpool_ukernel__scalar(
size_t kernel_elements,
size_t channels,
uint32_t fill,
const uint32_t* input,
const uint32_t* index,
uint32_t** output)
{ … }void xnn_x32_zip_x2_ukernel__scalar(
size_t n,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x32_zip_x3_ukernel__scalar(
size_t n,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x32_zip_x4_ukernel__scalar(
size_t n,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x32_zip_xm_ukernel__scalar(
size_t n,
size_t m,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x64_transposec_ukernel__4x2_scalar_int(
const uint64_t *input,
uint64_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x8_lut_ukernel__scalar_u4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{ … }void xnn_x8_packq_f32qp8_ukernel__scalar_u1(size_t m, size_t k, size_t mr,
size_t kr, size_t sr,
size_t m_idx_start,
const float* XNN_RESTRICT lhs,
size_t lhs_stride,
void* XNN_RESTRICT lhs_packed) { … }void xnn_x8_packw_gemm_goi_ukernel_x16__scalar_int_u2(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
const void* scale,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x8_packw_gemm_goi_ukernel_x32__scalar_int_u2(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
const void* scale,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x8_packw_gemm_goi_ukernel_x4__scalar_int_u2(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
const void* scale,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x8_packw_gemm_goi_ukernel_x8__scalar_int_u2(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const int8_t* weights,
const uint32_t* bias,
const void* scale,
int8_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x8_transposec_ukernel__2x4_scalar_int(
const uint8_t *input,
uint8_t * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x8_zip_x2_ukernel__scalar(
size_t n,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_x8_zip_x3_ukernel__scalar(
size_t n,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_x8_zip_x4_ukernel__scalar(
size_t n,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_x8_zip_xm_ukernel__scalar(
size_t n,
size_t m,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_xx_copy_ukernel__scalar_memcpy(size_t batch, const void* input, void* output, const void* params) { … }void xnn_xx_fill_ukernel__scalar_u16(
size_t rows,
size_t channels,
void* output,
size_t output_stride,
const uint32_t fill_pattern)
{ … }void xnn_xx_pad_ukernel_p4__scalar_u16(
size_t rows,
size_t channels,
size_t pre_padding,
size_t post_padding,
const void* input,
size_t input_stride,
void* output,
size_t output_stride,
const uint32_t fill_pattern) XNN_OOB_READS
{ … }void xnn_xx_transposev_ukernel__1x1_scalar_memcpy(
const void* input,
void* output,
size_t input_row_stride,
size_t output_row_stride,
size_t input_element_stride,
size_t output_element_stride,
size_t element_size,
size_t block_width,
size_t block_height)
{ … }void xnn_f32_vabs_ukernel__scalar_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vcopysign_ukernel__scalar_u2(
size_t batch,
const float* mag,
const float* sign,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vcopysignc_ukernel__scalar_u2(
size_t batch,
const float* mag,
const float* sign,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vexp_ukernel__scalar_exp_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vgelu_ukernel__scalar_rational_12_10_div_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }static XNN_INLINE xnn_simd_f32_t xnn_signed_getexp_f32(xnn_simd_f32_t a) { … }void xnn_f32_vlog_ukernel__scalar_rational_3_3_div_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vneg_ukernel__scalar_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vrcopysignc_ukernel__scalar_u2(
size_t batch,
const float* sign,
const float* mag,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vsqr_ukernel__scalar_u4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vtanh_ukernel__scalar_rational_9_6_div_u1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_s32_vmul_ukernel__scalar_u2(
size_t batch,
const int32_t* input_a,
const int32_t* input_b,
int32_t* output,
const union xnn_s32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_s32_vmulc_ukernel__scalar_u2(
size_t batch,
const int32_t* input1,
const int32_t* input2,
int32_t* output,
const union xnn_s32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }