#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
#include <linux/types.h>
#include "qed_vf.h"
#define QED_ETH_VF_NUM_MAC_FILTERS …
#define QED_ETH_VF_NUM_VLAN_FILTERS …
#define QED_VF_ARRAY_LENGTH …
#ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) …
#define IS_PF(cdev) …
#define IS_PF_SRIOV(p_hwfn) …
#else
#define IS_VF …
#define IS_PF …
#define IS_PF_SRIOV …
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) …
#define QED_MAX_VF_CHAINS_PER_PF …
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS …
enum qed_iov_vport_update_flag { … };
struct qed_public_vf_info { … };
struct qed_iov_vf_init_params { … };
struct qed_hw_sriov_info { … };
struct qed_iov_vf_mbx { … };
#define QED_IOV_LEGACY_QID_RX …
#define QED_IOV_LEGACY_QID_TX …
#define QED_IOV_QID_INVALID …
struct qed_vf_queue_cid { … };
struct qed_vf_queue { … };
enum vf_state { … };
struct qed_vf_vlan_shadow { … };
struct qed_vf_shadow_config { … };
struct qed_vf_info { … };
struct qed_pf_iov { … };
enum qed_iov_wq_flag { … };
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#ifdef CONFIG_QED_SRIOV
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only, bool b_non_malicious);
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
int vfid, u16 vxlan_port, u16 geneve_port);
int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
struct fw_err_data *p_data);
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
union event_ring_data *data, u8 fw_return_code);
int qed_iov_alloc(struct qed_hwfn *p_hwfn);
void qed_iov_setup(struct qed_hwfn *p_hwfn);
void qed_iov_free(struct qed_hwfn *p_hwfn);
void qed_iov_free_hw_info(struct qed_dev *cdev);
bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
int qed_iov_wq_start(struct qed_dev *cdev);
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else
static inline bool
qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
{
return false;
}
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
return MAX_NUM_VFS;
}
static inline void
qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port)
{
}
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
{
}
static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
{
}
static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs)
{
return false;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
{
}
static inline int qed_iov_wq_start(struct qed_dev *cdev)
{
return 0;
}
static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
enum qed_iov_wq_flag flag)
{
}
static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
{
}
static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
{
return 0;
}
static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
}
static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
struct fw_err_data *p_data)
{
}
static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
__le16 echo, union event_ring_data *data,
u8 fw_return_code)
{
return 0;
}
#endif
#define qed_for_each_vf(_p_hwfn, _i) …
#endif