linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h

/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */

#ifndef MLX5HWS_SEND_H_
#define MLX5HWS_SEND_H_

/* As a single operation requires at least two WQEBBS.
 * This means a maximum of 16 such operations per rule.
 */
#define MAX_WQES_PER_RULE

enum mlx5hws_wqe_opcode {};

enum mlx5hws_wqe_opmod {};

enum mlx5hws_wqe_gta_opcode {};

enum mlx5hws_wqe_gta_opmod {};

enum mlx5hws_wqe_gta_sz {};

/* WQE Control segment. */
struct mlx5hws_wqe_ctrl_seg {};

struct mlx5hws_wqe_gta_ctrl_seg {};

struct mlx5hws_wqe_gta_data_seg_ste {};

struct mlx5hws_wqe_gta_data_seg_arg {};

struct mlx5hws_wqe_gta {};

struct mlx5hws_send_ring_cq {};

struct mlx5hws_send_ring_priv {};

struct mlx5hws_send_ring_dep_wqe {};

struct mlx5hws_send_ring_sq {};

struct mlx5hws_send_ring {};

struct mlx5hws_completed_poll_entry {};

struct mlx5hws_completed_poll {};

struct mlx5hws_send_engine {};

struct mlx5hws_send_engine_post_ctrl {};

struct mlx5hws_send_engine_post_attr {};

struct mlx5hws_send_ste_attr {};

struct mlx5hws_send_ring_dep_wqe *
mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);

void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);

void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);

void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);

int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
			    struct mlx5hws_send_engine *queue,
			    u16 queue_size);

void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);

int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
			     u16 queues,
			     u16 queue_size);

int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
			      u16 queue_id,
			      u32 actions);

int mlx5hws_send_test(struct mlx5hws_context *ctx,
		      u16 queues,
		      u16 queue_size);

struct mlx5hws_send_engine_post_ctrl
mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);

void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
				      char **buf, size_t *len);

void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
				  struct mlx5hws_send_engine_post_attr *attr);

void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
		      struct mlx5hws_send_ste_attr *ste_attr);

void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
			  struct mlx5hws_send_engine *queue,
			  struct mlx5hws_send_ste_attr *ste_attr);

void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);

static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
{}

static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
{}

static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
{}

static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
{}

static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
						void *user_data,
						int comp_status)
{}

static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
{}

#endif /* MLX5HWS_SEND_H_ */