chromium/third_party/xnnpack/src/src/operators/convolution-nchw.c

// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>

#include <fp16/fp16.h>
#include "xnnpack.h"
#include "xnnpack/allocator.h"
#include "xnnpack/cache.h"
#include "xnnpack/common.h"
#include "xnnpack/compute.h"
#include "xnnpack/config-types.h"
#include "xnnpack/config.h"
#include "xnnpack/log.h"
#include "xnnpack/math.h"
#include "xnnpack/microfnptr.h"
#include "xnnpack/microkernel-type.h"
#include "xnnpack/operator-type.h"
#include "xnnpack/operator-utils.h"
#include "xnnpack/operator.h"
#include "xnnpack/pack.h"
#include "xnnpack/params.h"
#include "pthreadpool.h"

static enum xnn_status create_spmm_path(
    const uint32_t kernel_height,
    const uint32_t kernel_width,
    const uint32_t groups,
    const size_t group_input_channels,
    const size_t group_output_channels,
    const void* kernel,
    const void* bias,
    const uint32_t log2_filter_element_size,
    const xnn_analyze_spmm_w_fn xnn_analyze_spmm,
    const xnn_pack_spmm_w_fn xnn_pack_spmm,
    const struct xnn_spmm_config* spmm_config,
    const struct xnn_spmm_config* spmm2_config,
    const struct xnn_spmm_config* spmm4_config,
    const enum xnn_operator_type operator_type,
    const xnn_operator_t convolution_op)
{}

static enum xnn_status create_conv2d_hwc2chw_path(
    const uint32_t kernel_height,
    const uint32_t kernel_width,
    const uint32_t groups,
    const size_t group_input_channels,
    const size_t group_output_channels,
    const size_t output_height_tile,
    const size_t output_channel_tile,
    const void* kernel,
    const void* bias,
    const uint32_t log2_filter_element_size,
    const xnn_pack_dconv_oki_w_fn xnn_pack_dconv_oki_w,
    const xnn_conv_hwc2chw_ukernel_fn conv_hwc2chw_ukernel,
    const enum xnn_operator_type operator_type,
    const xnn_operator_t convolution_op)
{}

static enum xnn_status create_dwconv_path(
    const uint32_t kernel_height,
    const uint32_t kernel_width,
    const uint32_t groups,
    const void* kernel,
    const void* bias,
    const uint32_t flags,
    const uint32_t log2_filter_element_size,
    const xnn_pack_chw_dwconv_hwg_w_fn pack_chw_dwconv_hwg_w,
    const xnn_pack_chw_dwconv_ghw_w_fn pack_chw_dwconv_ghw_w,
    const xnn_update_chw_params_fn update_chw_params,
    const size_t output_width_tile,
    const xnn_dwconv2d_chw_ukernel_fn dwconv_ukernel,
    const enum xnn_operator_type operator_type,
    const xnn_operator_t convolution_op)
{}

enum xnn_status xnn_create_convolution2d_nchw_f16(
    uint32_t input_padding_top,
    uint32_t input_padding_right,
    uint32_t input_padding_bottom,
    uint32_t input_padding_left,
    uint32_t kernel_height,
    uint32_t kernel_width,
    uint32_t subsampling_height,
    uint32_t subsampling_width,
    uint32_t dilation_height,
    uint32_t dilation_width,
    uint32_t groups,
    size_t group_input_channels,
    size_t group_output_channels,
    size_t input_channel_stride,
    size_t output_channel_stride,
    const void* kernel,
    const void* bias,
    float output_min,
    float output_max,
    uint32_t flags,
    xnn_code_cache_t code_cache,
    xnn_weights_cache_t weights_cache,
    xnn_operator_t* convolution_op_out)
{}


enum xnn_status xnn_create_convolution2d_nchw_f32(
    uint32_t input_padding_top,
    uint32_t input_padding_right,
    uint32_t input_padding_bottom,
    uint32_t input_padding_left,
    uint32_t kernel_height,
    uint32_t kernel_width,
    uint32_t subsampling_height,
    uint32_t subsampling_width,
    uint32_t dilation_height,
    uint32_t dilation_width,
    uint32_t groups,
    size_t group_input_channels,
    size_t group_output_channels,
    size_t input_channel_stride,
    size_t output_channel_stride,
    const float* kernel,
    const float* bias,
    float output_min,
    float output_max,
    uint32_t flags,
    xnn_code_cache_t code_cache,
    xnn_weights_cache_t weights_cache,
    xnn_operator_t* convolution_op_out)
{}

static enum xnn_status reshape_convolution2d_nchw(
  xnn_operator_t convolution_op,
  enum xnn_operator_type expected_operator_type,
  size_t batch_size,
  size_t input_height,
  size_t input_width,
  uint32_t log2_input_element_size,
  uint32_t log2_filter_element_size,
  uint32_t bias_element_size,
  uint32_t log2_output_element_size,
  const void* params,
  void* chw_params,
  size_t* output_height_out,
  size_t* output_width_out,
  pthreadpool_t threadpool)
{}

enum xnn_status xnn_reshape_convolution2d_nchw_f16(
    xnn_operator_t convolution_op,
    size_t batch_size,
    size_t input_height,
    size_t input_width,
    size_t* output_height_out,
    size_t* output_width_out,
    pthreadpool_t threadpool)
{}

enum xnn_status xnn_reshape_convolution2d_nchw_f32(
    xnn_operator_t convolution_op,
    size_t batch_size,
    size_t input_height,
    size_t input_width,
    size_t* output_height_out,
    size_t* output_width_out,
    pthreadpool_t threadpool)
{}

static enum xnn_status setup_convolution2d_nchw(
  xnn_operator_t convolution_op,
  enum xnn_operator_type expected_operator_type,
  const void* input,
  void* output)
{}

enum xnn_status xnn_setup_convolution2d_nchw_f16(
    xnn_operator_t convolution_op,
    const void* input,
    void* output)
{}

enum xnn_status xnn_setup_convolution2d_nchw_f32(
    xnn_operator_t convolution_op,
    const float* input,
    float* output)
{}