linux/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h

/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */

#ifndef __MLX5_EN_TXRX_H___
#define __MLX5_EN_TXRX_H___

#include "en.h"
#include <linux/indirect_call_wrapper.h>
#include <net/ip6_checksum.h>
#include <net/tcp.h>

#define MLX5E_TX_WQE_EMPTY_DS_COUNT

#define INL_HDR_START_SZ

/* IPSEC inline data includes:
 * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
 *    next header.
 * 2. ESP authentication data: 16 bytes for ICV.
 */
#define MLX5E_MAX_TX_IPSEC_DS

/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
 * encapsulations.
 */
#define MLX5E_MAX_TX_INLINE_DS

/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
#define MLX5E_MAX_TX_WQEBBS

#define MLX5E_RX_ERR_CQE(cqe)

#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)

#define MLX5E_KSM_UMR_WQEBBS(ksm_entries)

#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)

#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)

#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)

#define MLX5E_MAX_KSM_PER_WQE(mdev)

static inline
ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
{}

enum mlx5e_icosq_wqe_type {};

/* General */
static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
{}

void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);

/* RX */
INDIRECT_CALLABLE_DECLARE();
INDIRECT_CALLABLE_DECLARE();
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);

static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{}

/* TX */
struct mlx5e_xmit_data {};

struct mlx5e_xmit_data_frags {};

netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);

static inline bool
mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
{}

static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{}

static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
{}

#define MLX5E_TX_FETCH_WQE(sq, pi)

static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{}

static inline struct mlx5e_tx_wqe *
mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{}

struct mlx5e_tx_wqe_info {};

static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
{}

void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);

static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{}

struct mlx5e_shampo_umr {};

struct mlx5e_icosq_wqe_info {};

void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);

static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
{}

static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
		struct mlx5_wqe_ctrl_seg *ctrl)
{}

static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{}

static inline struct mlx5e_sq_dma *
mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{}

static inline void
mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
	       enum mlx5e_dma_map_type map_type)
{}

static inline
struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
{}

static inline
void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
{}

static inline
struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
{}

static inline void
mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
{}

void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);

static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{}

static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
{}

static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
					struct mlx5_err_cqe *err_cqe)
{}

static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
{}

static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
{}

static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
{}

static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
{}

/* SW parser related functions */

struct mlx5e_swp_spec {};

static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
{}

static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
		   struct mlx5e_swp_spec *swp_spec)
{}

static inline void
mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
{}

#define MLX5E_STOP_ROOM(wqebbs)

static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{}

static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
{}

static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
{}

static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{}

static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
{}
#endif