#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#ifndef M_LN2
#define M_LN2 …
#endif
#include <immintrin.h>
#include "xnnpack/argmaxpool.h"
#include "xnnpack/avgpool.h"
#include "xnnpack/common.h"
#include "xnnpack/dwconv.h"
#include "xnnpack/fill.h"
#include "xnnpack/gavgpool.h"
#include "xnnpack/gemm.h"
#include "xnnpack/ibilinear.h"
#include "xnnpack/igemm.h"
#include "xnnpack/intrinsics-polyfill.h"
#include "xnnpack/math.h"
#include "xnnpack/maxpool.h"
#include "xnnpack/microparams.h"
#include "xnnpack/packw.h"
#include "xnnpack/pad.h"
#include "xnnpack/prelu.h"
#include "xnnpack/raddstoreexpminusmax.h"
#include "xnnpack/reduce.h"
#include "xnnpack/simd/f32-sse2.h"
#include "xnnpack/transpose.h"
#include "xnnpack/unaligned.h"
#include "xnnpack/unpool.h"
#include "xnnpack/vbinary.h"
#include "xnnpack/vcvt.h"
#include "xnnpack/vlrelu.h"
#include "xnnpack/vunary.h"
#include "xnnpack/zip.h"
void xnn_f16_f32_vcvt_ukernel__sse2_int16_u32(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f16_vabs_ukernel__sse2_u16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f16_vneg_ukernel__sse2_u16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_argmaxpool_ukernel_4x__sse2_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{ … }
void xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* accumulation_buffer,
uint32_t* index_buffer,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{ … }
void xnn_f32_argmaxpool_ukernel_9x__sse2_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{ … }
void xnn_f32_f16_vcvt_ukernel__sse2_u16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_prelu_ukernel__sse2_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{ … }
void xnn_f32_qs8_vcvt_ukernel__sse2_u32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_qu8_vcvt_ukernel__sse2_u32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_u20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__sse2_rr2_lut16_p3_u12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_vrndd_ukernel__sse2_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_vrndne_ukernel__sse2_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_vrndu_ukernel__sse2_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_f32_vrndz_ukernel__sse2_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__sse2_rr2_lut64_p2_div_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qb4w_gemm_minmax_ukernel_1x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qb4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qb4w_gemm_minmax_ukernel_4x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qb4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qc4w_gemm_minmax_ukernel_1x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qc4w_gemm_minmax_ukernel_4x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const int8_t* zero_data,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const int8_t* zero_data,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)],
const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs16_qs8_vcvt_ukernel__sse2_u16(
size_t batch,
const int16_t* input,
int8_t* output,
const union xnn_qs16_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_dwconv_minmax_fp32_ukernel_25p8c__sse2_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__sse2_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_f32_vcvt_ukernel__sse2_u32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p8c__sse2_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p8c__sse2_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p8c__sse2_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_vaddc_minmax_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_vcvt_ukernel__sse2_u32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_vlrelu_ukernel__sse2_u32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qs8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__sse2_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qu8_avgpool_minmax_fp32_ukernel_9x__sse2_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p8c__sse2_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p8c__sse2_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qu8_f32_vcvt_ukernel__sse2_u32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_gemm_minmax_fp32_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_igemm_minmax_fp32_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_vcvt_ukernel__sse2_u32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_vlrelu_ukernel__sse2_u32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_qu8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_u8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_s8_ibilinear_ukernel__sse2_c8(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{ … }void xnn_s8_maxpool_minmax_ukernel_9p8x__sse2_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const int8_t** input,
size_t input_offset,
int8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_s8_vclamp_ukernel__sse2_u64(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_u8_ibilinear_ukernel__sse2_c8(
size_t output_pixels,
size_t channels,
const uint8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
uint8_t* restrict output,
size_t output_increment) XNN_OOB_READS
{ … }void xnn_u8_maxpool_minmax_ukernel_9p8x__sse2_c16(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_u8_rmax_ukernel__sse2_u16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const void* params)
{ … }void xnn_u8_vclamp_ukernel__sse2_u64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_u8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x16_transposec_ukernel__8x8_reuse_multi_sse2(
const uint16_t* input,
uint16_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x16_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x32_packw_gemm_goi_ukernel_x2c4__sse2_u4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
const void* scale,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x32_packw_gemm_goi_ukernel_x8__sse2_u4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
const void* scale,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{ … }void xnn_x32_unpool_ukernel__sse2(
size_t kernel_elements,
size_t channels,
uint32_t fill,
const uint32_t* input,
const uint32_t* index,
uint32_t** output)
{ … }void xnn_x32_zip_x2_ukernel__sse2(
size_t n,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x32_zip_x3_ukernel__sse2(
size_t n,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x32_zip_x4_ukernel__sse2(
size_t n,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x32_zip_xm_ukernel__sse2(
size_t n,
size_t m,
const uint32_t* input,
uint32_t* output)
{ … }void xnn_x64_transposec_ukernel__2x2_multi_mov_sse2(
const uint64_t* input,
uint64_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x64_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2(
const uint8_t* input,
uint8_t* output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x8_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{ … }void xnn_x8_zip_x2_ukernel__sse2(
size_t n,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_x8_zip_x3_ukernel__sse2(
size_t n,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_x8_zip_x4_ukernel__sse2(
size_t n,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_x8_zip_xm_ukernel__sse2(
size_t n,
size_t m,
const uint8_t* input,
uint8_t* output)
{ … }void xnn_xx_fill_ukernel__sse2_u64(
size_t rows,
size_t channels,
void* output,
size_t output_stride,
const uint32_t fill_pattern)
{ … }void xnn_xx_pad_ukernel_p16__sse2_u16(
size_t rows,
size_t channels,
size_t pre_padding,
size_t post_padding,
const void* input,
size_t input_stride,
void* output,
size_t output_stride,
const uint32_t fill_pattern) XNN_OOB_READS
{ … }void xnn_f32_vabs_ukernel__sse2_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vcopysign_ukernel__sse2_u8(
size_t batch,
const float* mag,
const float* sign,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vcopysignc_ukernel__sse2_u8(
size_t batch,
const float* mag,
const float* sign,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vgelu_ukernel__sse2_rational_12_10_div_u12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }static XNN_INLINE xnn_simd_f32_t xnn_signed_getexp_f32(xnn_simd_f32_t a) { … }void xnn_f32_vlog_ukernel__sse2_rational_3_3_div_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vneg_ukernel__sse2_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vrcopysignc_ukernel__sse2_u8(
size_t batch,
const float* sign,
const float* mag,
float* output,
const union xnn_f32_default_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vsqr_ukernel__sse2_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{ … }void xnn_f32_vtanh_ukernel__sse2_rational_9_6_div_u8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_tanh_params unused_params[restrict XNN_MIN_ELEMENTS(1)])
{ … }