#include "mlx5hws_internal.h"
#include "lib/clock.h"
enum { … };
struct mlx5hws_send_ring_dep_wqe *
mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
{ … }
void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
{ … }
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
{ … }
struct mlx5hws_send_engine_post_ctrl
mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
{ … }
void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
char **buf, size_t *len)
{ … }
static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
{ … }
static void
hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
struct mlx5hws_rule_match_tag *tag,
bool is_jumbo)
{ … }
void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
struct mlx5hws_send_engine_post_attr *attr)
{ … }
static void hws_send_wqe(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_engine_post_attr *send_attr,
struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
void *send_wqe_data,
void *send_wqe_tag,
bool is_jumbo,
u8 gta_opcode,
u32 direct_index)
{ … }
void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ste_attr *ste_attr)
{ … }
static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_priv *priv,
u16 wqe_cnt)
{ … }
void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
{ … }
static void
hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_priv *priv,
enum mlx5hws_flow_op_status *status)
{ … }
static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_priv *priv,
u16 wqe_cnt,
enum mlx5hws_flow_op_status *status)
{ … }
static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
struct mlx5_cqe64 *cqe,
struct mlx5hws_send_ring_priv *priv,
struct mlx5hws_flow_op_result res[],
s64 *i,
u32 res_nb,
u16 wqe_cnt)
{ … }
static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
struct mlx5_cqe64 *cqe64)
{ … }
static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
{ … }
static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
struct mlx5hws_flow_op_result res[],
s64 *polled,
u32 res_nb)
{ … }
static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
struct mlx5hws_flow_op_result res[],
s64 *polled,
u32 res_nb)
{ … }
static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
struct mlx5hws_flow_op_result res[],
u32 res_nb)
{ … }
int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
u16 queue_id,
struct mlx5hws_flow_op_result res[],
u32 res_nb)
{ … }
static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
int numa_node,
struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_sq *sq,
void *sqc_data)
{ … }
static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
{ … }
static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
void *sqc_data,
struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_sq *sq,
struct mlx5hws_send_ring_cq *cq)
{ … }
static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev,
struct mlx5hws_send_ring_sq *sq)
{ … }
static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
{ … }
static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
{ … }
static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
void *sqc_data,
struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_sq *sq,
struct mlx5hws_send_ring_cq *cq)
{ … }
static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
int numa_node,
struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_sq *sq,
struct mlx5hws_send_ring_cq *cq)
{ … }
static void hws_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{ … }
static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
int numa_node,
struct mlx5hws_send_engine *queue,
void *cqc_data,
struct mlx5hws_send_ring_cq *cq)
{ … }
static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
struct mlx5hws_send_engine *queue,
void *cqc_data,
struct mlx5hws_send_ring_cq *cq)
{ … }
static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
struct mlx5hws_send_engine *queue,
int numa_node,
struct mlx5hws_send_ring_cq *cq)
{ … }
static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
{ … }
static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
{ … }
static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
struct mlx5hws_send_engine *queue)
{ … }
void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
{ … }
int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
struct mlx5hws_send_engine *queue,
u16 queue_size)
{ … }
static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
{ … }
static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
{ … }
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
{ … }
static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
{ … }
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queues,
u16 queue_size)
{ … }
int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
u16 queue_id,
u32 actions)
{ … }
static int
hws_send_wqe_fw(struct mlx5_core_dev *mdev,
u32 pd_num,
struct mlx5hws_send_engine_post_attr *send_attr,
struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
void *send_wqe_match_data,
void *send_wqe_match_tag,
void *send_wqe_range_data,
void *send_wqe_range_tag,
bool is_jumbo,
u8 gta_opcode)
{ … }
void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ste_attr *ste_attr)
{ … }