linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */

#include "mlx5hws_internal.h"

enum mlx5hws_arg_chunk_size
mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
{}

u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
{}

enum mlx5hws_arg_chunk_size
mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
{}

u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
{}

bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
{}

/* Cache and cache element handling */
int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
{}

void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
{}

static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
					__be64 cur_actions[],
					int num_of_actions,
					__be64 actions[])
{}

static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
				u16 num_of_actions,
				__be64 *actions)
{}

static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
					u16 num_of_actions,
					__be64 *actions)
{}

static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
				 u32 pattern_id,
				 u16 num_of_actions,
				 __be64 *actions)
{}

static struct mlx5hws_pattern_cache_item *
mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
				      u32 ptrn_id)
{}

static void
mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
{}

void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
{}

int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
			    __be64 *pattern, size_t pattern_sz,
			    u32 *pattern_id)
{}

static void
mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
			 void *comp_data,
			 u32 arg_idx)
{}

void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
			       u32 arg_idx,
			       u8 *arg_data,
			       u16 num_of_actions)
{}

void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
		       void *comp_data,
		       u32 arg_idx,
		       u8 *arg_data,
		       size_t data_size)
{}

int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
				      u32 arg_idx,
				      u8 *arg_data,
				      size_t data_size)
{}

bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
					   u32 arg_size)
{}

int mlx5hws_arg_create(struct mlx5hws_context *ctx,
		       u8 *data,
		       size_t data_sz,
		       u32 log_bulk_sz,
		       bool write_data,
		       u32 *arg_id)
{}

void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
{}

int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
					 __be64 *data,
					 u8 num_of_actions,
					 u32 log_bulk_sz,
					 bool write_data,
					 u32 *arg_id)
{}

static int
hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
{}

#define INVALID_FIELD

static void
hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
				    u16 *src_field, u16 *dst_field)
{}

bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
{}

void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
			   size_t max_actions, size_t *new_size,
			   u32 *nope_location, __be64 *new_pat)
{}