linux/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

/*
 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/etherdevice.h>
#include <linux/idr.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/indir_table.h"
#include "esw/acl/ofld.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "lib/eq.h"
#include "lib/fs_chains.h"
#include "en_tc.h"
#include "en/mapping.h"
#include "devlink.h"
#include "lag/lag.h"
#include "en/tc/post_meter.h"

#define mlx5_esw_for_each_rep(esw, i, rep)

/* There are two match-all miss flows, one for unicast dst mac and
 * one for multicast.
 */
#define MLX5_ESW_MISS_FLOWS
#define UPLINK_REP_INDEX

#define MLX5_ESW_VPORT_TBL_SIZE
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS

#define MLX5_ESW_FT_OFFLOADS_DROP_RULE

#define MLX5_ESW_MAX_CTRL_EQS
#define MLX5_ESW_DEFAULT_SF_COMP_EQS

static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns =;

static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
						     u16 vport_num)
{}

static void
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
				  struct mlx5_flow_spec *spec,
				  struct mlx5_esw_flow_attr *attr)
{}

/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
 * are not needed as well in the following process. So clear them all for simplicity.
 */
void
mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
{}

static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
				  struct mlx5_flow_spec *spec,
				  struct mlx5_flow_attr *attr,
				  struct mlx5_eswitch *src_esw,
				  u16 vport)
{}

static int
esw_setup_decap_indir(struct mlx5_eswitch *esw,
		      struct mlx5_flow_attr *attr)
{}

static void
esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
			struct mlx5_flow_attr *attr)
{}

static int
esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
		   struct mlx5e_meter_attr *meter,
		   int i)
{}

static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
		       struct mlx5_flow_act *flow_act,
		       u32 sampler_id,
		       int i)
{}

static int
esw_setup_ft_dest(struct mlx5_flow_destination *dest,
		  struct mlx5_flow_act *flow_act,
		  struct mlx5_eswitch *esw,
		  struct mlx5_flow_attr *attr,
		  int i)
{}

static void
esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
		      struct mlx5_fs_chains *chains, int i)
{}

static void
esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
			 struct mlx5_eswitch *esw, int i)
{}

static int
esw_setup_chain_dest(struct mlx5_flow_destination *dest,
		     struct mlx5_flow_act *flow_act,
		     struct mlx5_fs_chains *chains,
		     u32 chain, u32 prio, u32 level,
		     int i)
{}

static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
				     int from, int to)
{}

static bool
esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
{}

static int
esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
				 struct mlx5_flow_act *flow_act,
				 struct mlx5_eswitch *esw,
				 struct mlx5_fs_chains *chains,
				 struct mlx5_flow_attr *attr,
				 int *i)
{}

static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
					       struct mlx5_flow_attr *attr)
{}

static bool
esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{}

static int
esw_setup_indir_table(struct mlx5_flow_destination *dest,
		      struct mlx5_flow_act *flow_act,
		      struct mlx5_eswitch *esw,
		      struct mlx5_flow_attr *attr,
		      int *i)
{}

static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{}

static void
esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
{}

static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
{}

static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
					      struct mlx5_esw_flow_attr *esw_attr,
					      int attr_idx)
{}

static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
					   struct mlx5_esw_flow_attr *esw_attr)
{}

static void
esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
			 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
			 int attr_idx, int dest_idx, bool pkt_reformat)
{}

static void
esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
			 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
			 int attr_idx, int dest_idx, bool pkt_reformat)
{}

static void
esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
		     struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
		     int attr_idx, int dest_idx, bool pkt_reformat)
{}

static int
esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
		      struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
		      int i)
{}

static bool
esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
{}

static bool
esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{}

static int
esw_setup_dests(struct mlx5_flow_destination *dest,
		struct mlx5_flow_act *flow_act,
		struct mlx5_eswitch *esw,
		struct mlx5_flow_attr *attr,
		struct mlx5_flow_spec *spec,
		int *i)
{}

static void
esw_cleanup_dests(struct mlx5_eswitch *esw,
		  struct mlx5_flow_attr *attr)
{}

static void
esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
{}

struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
				struct mlx5_flow_spec *spec,
				struct mlx5_flow_attr *attr)
{}

struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
			  struct mlx5_flow_spec *spec,
			  struct mlx5_flow_attr *attr)
{}

static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
			struct mlx5_flow_handle *rule,
			struct mlx5_flow_attr *attr,
			bool fwd_rule)
{}

void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
				struct mlx5_flow_handle *rule,
				struct mlx5_flow_attr *attr)
{}

void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
			  struct mlx5_flow_handle *rule,
			  struct mlx5_flow_attr *attr)
{}

struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
				    struct mlx5_eswitch *from_esw,
				    struct mlx5_eswitch_rep *rep,
				    u32 sqn)
{}
EXPORT_SYMBOL();

void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{}

void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
{}

struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
{}

static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
{}

static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{}

static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
				  struct mlx5_core_dev *peer_dev,
				  struct mlx5_flow_spec *spec,
				  struct mlx5_flow_destination *dest)
{}

static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
					       struct mlx5_eswitch *peer_esw,
					       struct mlx5_flow_spec *spec,
					       u16 vport)
{}

static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
				       struct mlx5_core_dev *peer_dev)
{}

static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
					struct mlx5_core_dev *peer_dev)
{}

static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{}

struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{}

#define MAX_PF_SQ
#define MAX_SQ_NVPORTS

void
mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
				    u32 *flow_group_in,
				    int match_params)
{}

#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
{}

static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
{}

#define fdb_modify_header_fwd_to_table_supported(esw)
static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
{}

static int
esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
{}

static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{}

#else /* CONFIG_MLX5_CLS_ACT */

static int
esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
{ return 0; }

static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{}

#endif

static int
esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
			       struct mlx5_flow_table *fdb,
			       u32 *flow_group_in,
			       int *ix)
{}

static int
esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
				    struct mlx5_flow_table *fdb,
				    u32 *flow_group_in,
				    int *ix)
{}

static int
esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
			       struct mlx5_flow_table *fdb,
			       u32 *flow_group_in,
			       int *ix)
{}

static int
esw_create_miss_group(struct mlx5_eswitch *esw,
		      struct mlx5_flow_table *fdb,
		      u32 *flow_group_in,
		      int *ix)
{}

static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{}

static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{}

static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
{}

static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{}

static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
{}

static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
{}

static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
{}

static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
{}

static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
{}

static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
{}

void
mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
			      u16 vport,
			      struct mlx5_flow_spec *spec)
{}

struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
				  struct mlx5_flow_destination *dest)
{}

static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{}

static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{}

static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{}

static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
{}

static int esw_create_restore_table(struct mlx5_eswitch *esw)
{}

static int esw_offloads_start(struct mlx5_eswitch *esw,
			      struct netlink_ext_ack *extack)
{}

static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
{}

static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
					  struct mlx5_eswitch_rep *rep)
{}

static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
{}

static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{}

static int esw_port_metadata_set(struct devlink *devlink, u32 id,
				 struct devlink_param_gset_ctx *ctx,
				 struct netlink_ext_ack *extack)
{}

static int esw_port_metadata_get(struct devlink *devlink, u32 id,
				 struct devlink_param_gset_ctx *ctx)
{}

static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
				      union devlink_param_value val,
				      struct netlink_ext_ack *extack)
{}

static const struct devlink_param esw_devlink_params[] =;

int esw_offloads_init(struct mlx5_eswitch *esw)
{}

void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{}

static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
				   struct mlx5_eswitch_rep *rep, u8 rep_type)
{}

static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
				      struct mlx5_eswitch_rep *rep, u8 rep_type)
{}

static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{}

static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{}

static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{}

int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{}

void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{}

int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
				  struct mlx5_devlink_port *dl_port,
				  u32 controller, u32 sfnum)
{}

void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{}

int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{}

void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{}

static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
				  struct mlx5_core_dev *slave)
{}

static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
					struct mlx5_core_dev *slave,
					struct mlx5_vport *vport,
					struct mlx5_flow_table *acl)
{}

static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
					      struct mlx5_flow_namespace *egress_ns,
					      struct mlx5_vport *vport, size_t count)
{}

static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
{}

static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
				      struct mlx5_core_dev *slave, size_t count)
{}

static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
					 struct mlx5_core_dev *slave_dev)
{}

int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
					     struct mlx5_eswitch *slave_esw, int max_slaves)
{}

void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
					      struct mlx5_eswitch *slave_esw)
{}

#define ESW_OFFLOADS_DEVCOM_PAIR
#define ESW_OFFLOADS_DEVCOM_UNPAIR

static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
					       struct mlx5_eswitch *peer_esw)
{}

static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
				     struct mlx5_eswitch *peer_esw)
{}

static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
				  struct mlx5_eswitch *peer_esw)
{}

static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
					 struct mlx5_eswitch *peer_esw,
					 bool pair)
{}

static int mlx5_esw_offloads_devcom_event(int event,
					  void *my_data,
					  void *event_data)
{}

void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
{}

void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{}

bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
{}

bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
{}

#define MLX5_ESW_METADATA_RSVD_UPLINK

/* Share the same metadata for uplink's. This is fine because:
 * (a) In shared FDB mode (LAG) both uplink's are treated the
 *     same and tagged with the same metadata.
 * (b) In non shared FDB mode, packets from physical port0
 *     cannot hit eswitch of PF1 and vice versa.
 */
static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
{}

u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{}

void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
{}

static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
					     struct mlx5_vport *vport)
{}

static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
						struct mlx5_vport *vport)
{}

static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
{}

static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
{}

int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
				     struct mlx5_vport *vport)
{}

void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
				      struct mlx5_vport *vport)
{}

static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
{}

static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{}

int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{}

static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{}

static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{}

static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{}

static void esw_functions_changed_event_handler(struct work_struct *work)
{}

int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
{}

static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
{}

bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
{}

int esw_offloads_enable(struct mlx5_eswitch *esw)
{}

static int esw_offloads_stop(struct mlx5_eswitch *esw,
			     struct netlink_ext_ack *extack)
{}

void esw_offloads_disable(struct mlx5_eswitch *esw)
{}

static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
{}

static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
{}

static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
{}

static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
{}

int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{}

void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
{}

int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
				  struct netlink_ext_ack *extack)
{}

int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{}

static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
				      struct netlink_ext_ack *extack)
{}

int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
					 struct netlink_ext_ack *extack)
{}

int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{}

bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
{}

void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{}

int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
					enum devlink_eswitch_encap_mode encap,
					struct netlink_ext_ack *extack)
{}

int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
					enum devlink_eswitch_encap_mode *encap)
{}

static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{}

void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
				      const struct mlx5_eswitch_rep_ops *ops,
				      u8 rep_type)
{}
EXPORT_SYMBOL();

void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{}
EXPORT_SYMBOL();

void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{}

void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
				 u16 vport,
				 u8 rep_type)
{}
EXPORT_SYMBOL();

void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
{}
EXPORT_SYMBOL();

struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
						u16 vport)
{}
EXPORT_SYMBOL();

bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
{}
EXPORT_SYMBOL();

bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{}
EXPORT_SYMBOL();

u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
					      u16 vport_num)
{}
EXPORT_SYMBOL();

static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
{}

int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
{}

void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
{}

int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
{}

u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
					    u16 vport_num)
{}
EXPORT_SYMBOL();

int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
				     u8 *hw_addr, int *hw_addr_len,
				     struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
				     const u8 *hw_addr, int hw_addr_len,
				     struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
					struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
					struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
				  struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
				  struct netlink_ext_ack *extack)
{}

int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
				struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
{}

#ifdef CONFIG_XFRM_OFFLOAD
int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
					  struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
					  struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
					  struct netlink_ext_ack *extack)
{}

int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
					  bool enable,
					  struct netlink_ext_ack *extack)
{}
#endif /* CONFIG_XFRM_OFFLOAD */

int
mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
				    struct netlink_ext_ack *extack)
{}

int
mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
				    struct netlink_ext_ack *extack)
{}

int
mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
					       struct netlink_ext_ack *extack)
{}