linux/drivers/net/ethernet/mellanox/mlx5/core/en/params.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */

#include "en/params.h"
#include "en/txrx.h"
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>

static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{}

u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
{}

enum mlx5e_mpwrq_umr_mode
mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
{}

u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
{}

u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
			  enum mlx5e_mpwrq_umr_mode umr_mode)
{}

u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
			     enum mlx5e_mpwrq_umr_mode umr_mode)
{}

u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
			   enum mlx5e_mpwrq_umr_mode umr_mode)
{}

u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
			  enum mlx5e_mpwrq_umr_mode umr_mode)
{}

u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
			    enum mlx5e_mpwrq_umr_mode umr_mode)
{}

u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
				enum mlx5e_mpwrq_umr_mode umr_mode)
{}

static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
				      enum mlx5e_mpwrq_umr_mode umr_mode)
{}

u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
			       enum mlx5e_mpwrq_umr_mode umr_mode)
{}

u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
				 struct mlx5e_xsk_param *xsk)
{}

static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
				      struct mlx5e_xsk_param *xsk)
{}

static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{}

static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
					 struct mlx5e_params *params,
					 struct mlx5e_xsk_param *xsk,
					 bool mpwqe)
{}

static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
				       struct mlx5e_params *params,
				       struct mlx5e_xsk_param *xsk)
{}

bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
			    struct mlx5e_params *params,
			    struct mlx5e_xsk_param *xsk)
{}

static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
					  u8 log_stride_sz, u8 log_num_strides,
					  u8 page_shift,
					  enum mlx5e_mpwrq_umr_mode umr_mode)
{}

bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
					  struct mlx5e_params *params,
					  struct mlx5e_xsk_param *xsk)
{}

bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
				  struct mlx5e_params *params,
				  struct mlx5e_xsk_param *xsk)
{}

u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
			       struct mlx5e_params *params,
			       struct mlx5e_xsk_param *xsk)
{}

u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
				      struct mlx5e_params *params)
{}

u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
				  struct mlx5e_params *params)
{}

u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
				     struct mlx5e_params *params)
{}

u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
				   struct mlx5e_params *params,
				   struct mlx5e_xsk_param *xsk)
{}

u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
				   struct mlx5e_params *params,
				   struct mlx5e_xsk_param *xsk)
{}

u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
{}

u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
			  struct mlx5e_params *params,
			  struct mlx5e_xsk_param *xsk)
{}

u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{}

int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{}

bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{}

int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{}

int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
			     struct mlx5e_xsk_param *xsk)
{}

void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
			       struct mlx5e_params *params)
{}

void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{}

void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
			   struct mlx5e_params *params)
{}

/* Build queue parameters */

void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
{}

static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
{}

static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params,
					     struct mlx5e_rq_frags_info *info)
{}

#define DEFAULT_FRAG_SIZE

static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
				     struct mlx5e_params *params,
				     struct mlx5e_xsk_param *xsk,
				     struct mlx5e_rq_frags_info *info,
				     u32 *xdp_frag_size)
{}

static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
{}

static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
					struct mlx5e_cq_param *param)
{}

static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
					struct mlx5e_params *params,
					struct mlx5e_xsk_param *xsk)
{}

static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
				    struct mlx5e_params *params,
				    struct mlx5e_xsk_param *xsk,
				    struct mlx5e_cq_param *param)
{}

static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{}

int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
			 struct mlx5e_params *params,
			 struct mlx5e_xsk_param *xsk,
			 struct mlx5e_rq_param *param)
{}

void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
			       struct mlx5e_rq_param *param)
{}

void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
			     struct mlx5e_params *params,
			     struct mlx5e_cq_param *param)
{}

void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
				 struct mlx5e_sq_param *param)
{}

void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
			  struct mlx5e_params *params,
			  struct mlx5e_sq_param *param)
{}

static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
				     u8 log_wq_size,
				     struct mlx5e_cq_param *param)
{}

/* This function calculates the maximum number of headers entries that are needed
 * per WQE, the formula is based on the size of the reservations and the
 * restriction we have about max packets for reservation that is equal to max
 * headers per reservation.
 */
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
			    struct mlx5e_params *params,
			    struct mlx5e_rq_param *rq_param)
{}

/* This function calculates the maximum number of headers entries that are needed
 * for the WQ, this value is uesed to allocate the header buffer in HW, thus
 * must be a pow of 2.
 */
u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
			   struct mlx5e_params *params,
			   struct mlx5e_rq_param *rq_param)
{}

static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
				 struct mlx5e_params *params,
				 struct mlx5e_rq_param *rq_param)
{}

static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
					struct mlx5e_params *params,
					struct mlx5e_xsk_param *xsk)
{}

static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
				      struct mlx5e_params *params,
				      struct mlx5e_rq_param *rqp)
{}

static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{}

static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
				    u8 log_wq_size,
				    struct mlx5e_sq_param *param)
{}

static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
					  u8 log_wq_size,
					  struct mlx5e_sq_param *param)
{}

void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
			     struct mlx5e_params *params,
			     struct mlx5e_xsk_param *xsk,
			     struct mlx5e_sq_param *param)
{}

int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
			      struct mlx5e_params *params,
			      struct mlx5e_channel_param *cparam)
{}