linux/include/linux/mlx5/driver.h

/*
 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef MLX5_DRIVER_H
#define MLX5_DRIVER_H

#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/xarray.h>
#include <linux/workqueue.h>
#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/auxiliary_bus.h>
#include <linux/mutex.h>

#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
#include <linux/mlx5/eq.h>
#include <linux/timecounter.h>
#include <linux/ptp_clock_kernel.h>
#include <net/devlink.h>

#define MLX5_ADEV_NAME

#define MLX5_IRQ_EQ_CTRL

enum {};

enum {};

enum {};

enum mlx5_sqp_t {};

enum {};

enum {};

enum {};

enum mlx5_qpts_trust_state {};

enum mlx5_dcbx_oper_mode {};

enum {};

enum mlx5_page_fault_resume_flags {};

enum dbg_rsc_type {};

enum port_state_policy {};

enum mlx5_coredev_type {};

struct mlx5_field_desc {};

struct mlx5_rsc_debug {};

enum mlx5_dev_event {};

enum mlx5_port_status {};

enum mlx5_cmdif_state {};

struct mlx5_cmd_first {};

struct mlx5_cmd_msg {};

struct mlx5_cmd_debug {};

struct cmd_msg_cache {};

enum {};

struct mlx5_cmd_stats {};

struct mlx5_cmd {};

struct mlx5_cmd_mailbox {};

struct mlx5_buf_list {};

struct mlx5_frag_buf {};

struct mlx5_frag_buf_ctrl {};

struct mlx5_core_psv {};

struct mlx5_core_sig_ctx {};

#define MLX5_24BIT_MASK

enum mlx5_res_type {};

struct mlx5_core_rsc_common {};

struct mlx5_uars_page {};

struct mlx5_bfreg_head {};

struct mlx5_bfreg_data {};

struct mlx5_sq_bfreg {};

struct mlx5_core_health {};

enum {};

struct mlx5_vf_context {};

struct mlx5_core_sriov {};

struct mlx5_fc_pool {};

struct mlx5_fc_stats {};

struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_devcom_dev;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
struct mlx5_vhca_state_notifier;
struct mlx5_sf_dev_table;
struct mlx5_sf_hw_table;
struct mlx5_sf_table;
struct mlx5_crypto_dek_priv;

struct mlx5_rate_limit {};

struct mlx5_rl_entry {};

struct mlx5_rl_table {};

struct mlx5_core_roce {};

enum {};

struct mlx5_adev {};

struct mlx5_debugfs_entries {};

enum mlx5_func_type {};

struct mlx5_ft_pool;
struct mlx5_priv {};

enum mlx5_device_state {};

enum mlx5_interface_state {};

enum mlx5_pci_status {};

enum mlx5_pagefault_type_flags {};

struct mlx5_td {};

struct mlx5e_resources {};

enum mlx5_sw_icm_type {};

#define MLX5_MAX_RESERVED_GIDS

struct mlx5_rsvd_gids {};

#define MAX_PIN_NUM
struct mlx5_pps {};

struct mlx5_timer {};

struct mlx5_clock {};

struct mlx5_dm;
struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;

#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)
#define MLX5_SW_ICM_BLOCK_SIZE(dev)

enum {};

enum {};

struct mlx5_profile {};

struct mlx5_hca_cap {};

enum mlx5_wc_state {};

struct mlx5_core_dev {};

struct mlx5_db {};

enum {};

enum {};

mlx5_cmd_cbk_t;

enum {};

struct mlx5_cmd_work_ent {};

enum phy_port_state {};

struct mlx5_hca_vport_context {};

#define STRUCT_FIELD(header, field)

extern struct dentry *mlx5_debugfs_root;

static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
{}

static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
{}

static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
{}

static inline u32 mlx5_base_mkey(const u32 key)
{}

static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{}

static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
					u8 log_stride, u8 log_sz,
					u16 strides_offset,
					struct mlx5_frag_buf_ctrl *fbc)
{}

static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
				 u8 log_stride, u8 log_sz,
				 struct mlx5_frag_buf_ctrl *fbc)
{}

static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
					  u32 ix)
{}

static inline u32
mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
{}

enum {};

void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);

struct mlx5_async_ctx {};

struct mlx5_async_work;

mlx5_async_cbk_t;

struct mlx5_async_work {};

void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
			     struct mlx5_async_ctx *ctx);
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
		     void *out, int out_size, mlx5_async_cbk_t callback,
		     struct mlx5_async_work *work);
void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
		  int out_size);

#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out)

#define mlx5_cmd_exec_in(dev, ifc_cmd, in)

int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
			  void *out, int out_size);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);

void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);

void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);

void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
			     struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
			  int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
			 int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);

void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);

struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
		    void *data_out, int size_out, u16 reg_id, int arg,
		    int write, bool verbose);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
			 int size_in, void *data_out, int size_out,
			 u16 reg_num, int arg, int write);

int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
		       int node);

static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
{}

void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);

const char *mlx5_command_str(int command);
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
			 int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);

int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
		     struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
			 bool dedicated_entry, u16 *index);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
		       struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
		     bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);

unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev);
int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
			   u8 roce_version, u8 roce_l3_type, const u8 *gid,
			   const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);

static inline u32 mlx5_mkey_to_idx(u32 mkey)
{}

static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
{}

static inline u8 mlx5_mkey_variant(u32 mkey)
{}

/* Async-atomic event notifier used by mlx5 core to forward FW
 * evetns received from event queue to mlx5 consumers.
 * Optimise event queue dipatching.
 */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);

/* Async-atomic event notifier used for forwarding
 * evetns from the event queue into the to mlx5 events dispatcher,
 * eswitch, clock and others.
 */
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);

/* Blocking event notifier used to forward SW events, used for slow path */
int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
				      void *data);

int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);

int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
			   struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
				 u64 *values,
				 int num_counters,
				 size_t *offsets);
struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i);

#define mlx5_lag_for_each_peer_mdev(dev, peer, i)

u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
			 u64 length, u32 log_alignment, u16 uid,
			 phys_addr_t *addr, u32 *obj_id);
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
			   u64 length, u16 uid, phys_addr_t addr, u32 obj_id);

struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);

int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
					  int vf_id,
					  struct notifier_block *nb);
void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
					     int vf_id,
					     struct notifier_block *nb);
int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
			    struct ib_device *device,
			    struct rdma_netdev_alloc_params *params);

enum {};

static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
{}

static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
{}

static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{}

static inline bool
mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
{}

static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
{}

static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
{}

static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
{}

static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev)
{}

static inline int mlx5_get_gid_table_len(u16 param)
{}

static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
{}

static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
{}

static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
{}

static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
{}

static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
{}

static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
{}

enum {};

bool mlx5_is_roce_on(struct mlx5_core_dev *dev);

static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
{}

#ifdef CONFIG_MLX5_MACSEC
static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
{}

#define NIC_RDMA_BOTH_DIRS_CAPS

static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
{}
#endif

enum {};

bool mlx5_wc_support_get(struct mlx5_core_dev *mdev);
#endif /* MLX5_DRIVER_H */