linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell RVU Ethernet driver
 *
 * Copyright (C) 2020 Marvell.
 *
 */

#ifndef OTX2_COMMON_H
#define OTX2_COMMON_H

#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/macsec.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>

#include <mbox.h>
#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include "otx2_devlink.h"
#include <rvu_trace.h>
#include "qos.h"

/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE

/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF
#define PCI_DEVID_OCTEONTX2_RVU_VF
#define PCI_DEVID_OCTEONTX2_RVU_AFVF

#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF

/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM
#define PCI_MBOX_BAR_NUM

#define NAME_SIZE

#ifdef CONFIG_DCB
/* Max priority supported for PFC */
#define NIX_PF_PFC_PRIO_MAX
#endif

enum arua_mapped_qtypes {};

/* NIX LF interrupts range*/
#define NIX_LF_QINT_VEC_START
#define NIX_LF_CINT_VEC_START
#define NIX_LF_GINT_VEC
#define NIX_LF_ERR_VEC
#define NIX_LF_POISON_VEC

/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
#define SEND_CQ_SKID

#define OTX2_GET_RX_STATS(reg)
#define OTX2_GET_TX_STATS(reg)

struct otx2_lmt_info {};
/* RSS configuration */
struct otx2_rss_ctx {};

struct otx2_rss_info {};

/* NIX (or NPC) RX errors */
enum otx2_errlvl {};

enum otx2_errcodes_re {};

/* NIX TX stats */
enum nix_stat_lf_tx {};

/* NIX RX stats */
enum nix_stat_lf_rx {};

struct otx2_dev_stats {};

/* Driver counted stats */
struct otx2_drv_stats {};

struct mbox {};

/* Egress rate limiting definitions */
#define MAX_BURST_EXPONENT
#define MAX_BURST_MANTISSA
#define MAX_BURST_SIZE
#define MAX_RATE_DIVIDER_EXPONENT
#define MAX_RATE_EXPONENT
#define MAX_RATE_MANTISSA

/* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA
#define TLX_RATE_EXPONENT
#define TLX_RATE_DIVIDER_EXPONENT
#define TLX_BURST_MANTISSA
#define TLX_BURST_EXPONENT

struct otx2_hw {};

enum vfperm {};

struct otx2_vf_config {};

struct flr_work {};

struct refill_work {};

/* PTPv2 originTimestamp structure */
struct ptpv2_tstamp {} __packed;

struct otx2_ptp {};

#define OTX2_HW_TIMESTAMP_LEN

struct otx2_mac_table {};

struct otx2_flow_config {};

struct dev_hw_ops {};

#define CN10K_MCS_SA_PER_SC

/* Stats which need to be accumulated in software because
 * of shared counters in hardware.
 */
struct cn10k_txsc_stats {};

struct cn10k_rxsc_stats {};

struct cn10k_mcs_txsc {};

struct cn10k_mcs_rxsc {};

struct cn10k_mcs_cfg {};

struct otx2_nic {};

static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
{}

static inline bool is_96xx_A0(struct pci_dev *pdev)
{}

static inline bool is_96xx_B0(struct pci_dev *pdev)
{}

/* REVID for PCIe devices.
 * Bits 0..1: minor pass, bit 3..2: major pass
 * bits 7..4: midr id
 */
#define PCI_REVISION_ID_96XX
#define PCI_REVISION_ID_95XX
#define PCI_REVISION_ID_95XXN
#define PCI_REVISION_ID_98XX
#define PCI_REVISION_ID_95XXMM
#define PCI_REVISION_ID_95XXO

static inline bool is_dev_otx2(struct pci_dev *pdev)
{}

static inline bool is_dev_cn10kb(struct pci_dev *pdev)
{}

static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{}

/* Register read/write APIs */
static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
{}

static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
{}

static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
{}

/* Mbox bounce buffer APIs */
static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
{}

static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
{}

/* With the absence of API for 128-bit IO memory access for arm64,
 * implement required operations at place.
 */
#if defined(CONFIG_ARM64)
static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
{
	__asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
			 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
}

static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
{
	u64 result;

	__asm__ volatile(".cpu   generic+lse\n"
			 "ldadd %x[i], %x[r], [%[b]]"
			 : [r]"=r"(result), "+m"(*ptr)
			 : [i]"r"(incr), [b]"r"(ptr)
			 : "memory");
	return result;
}

#else
#define otx2_write128(lo, hi, addr)
#define otx2_atomic64_add(incr, ptr)
#endif

static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
					u64 *ptrs, u64 num_ptrs)
{}

static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
{}

/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{}

/* Free pointer to a pool/aura */
static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
{}

static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
{}

/* Mbox APIs */
static inline int otx2_sync_mbox_msg(struct mbox *mbox)
{}

static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
{}

/* Use this API to send mbox msgs in atomic context
 * where sleeping is not allowed
 */
static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
{}

#define M

MBOX_MESSAGES
#undef M

#define M			\

MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M

/* Time to wait before watchdog kicks off */
#define OTX2_TX_TIMEOUT

#define RVU_PFVF_PF_SHIFT
#define RVU_PFVF_PF_MASK
#define RVU_PFVF_FUNC_SHIFT
#define RVU_PFVF_FUNC_MASK

static inline bool is_otx2_vf(u16 pcifunc)
{}

static inline int rvu_get_pf(u16 pcifunc)
{}

static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
					   struct page *page,
					   size_t offset, size_t size,
					   enum dma_data_direction dir)
{}

static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
				       dma_addr_t addr, size_t size,
				       enum dma_data_direction dir)
{}

static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
{}

static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf)
{}

static inline u64 otx2_convert_rate(u64 rate)
{}

static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf)
{}

/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
int otx2_set_mac_address(struct net_device *netdev, void *p);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
void otx2_setup_segmentation(struct otx2_nic *pfvf);
int otx2_reset_mac_stats(struct otx2_nic *pfvf);

/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
int otx2_detach_resources(struct mbox *mbox);
int otx2_config_npa(struct otx2_nic *pfvf);
int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
void otx2_aura_pool_free(struct otx2_nic *pfvf);
void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
int otx2_config_nix(struct otx2_nic *pfvf);
int otx2_config_nix_queues(struct otx2_nic *pfvf);
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
void otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
		    dma_addr_t *dma);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
		      dma_addr_t *dma);
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
		   int stack_pages, int numptrs, int buf_size, int type);
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
		   int pool_id, int numptrs);

/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);

/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
			      struct msix_offset_rsp *rsp);
void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
			       struct npa_lf_alloc_rsp *rsp);
void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
			       struct nix_lf_alloc_rsp *rsp);
void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
				  struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
			    struct cgx_stats_rsp *rsp);
void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
				struct cgx_fec_stats_rsp *rsp);
void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
				struct nix_bp_cfg_rsp *rsp);

/* Device stats APIs */
void otx2_get_dev_stats(struct otx2_nic *pfvf);
void otx2_get_stats64(struct net_device *netdev,
		      struct rtnl_link_stats64 *stats);
void otx2_update_lmac_stats(struct otx2_nic *pfvf);
void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
void otx2vf_set_ethtool_ops(struct net_device *netdev);

int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
			     int tx_queues, int rx_queues);
int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);

/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
void otx2_mcam_flow_del(struct otx2_nic *pf);
int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
int otx2_get_flow(struct otx2_nic *pfvf,
		  struct ethtool_rxnfc *nfc, u32 location);
int otx2_get_all_flows(struct otx2_nic *pfvf,
		       struct ethtool_rxnfc *nfc, u32 *rule_locs);
int otx2_add_flow(struct otx2_nic *pfvf,
		  struct ethtool_rxnfc *nfc);
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
				   netdev_features_t features);
int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
		    u64 iova, int size);
int otx2_mcam_entry_init(struct otx2_nic *pfvf);

/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
		  void *type_data);
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);

/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);

#ifdef CONFIG_DCB
/* DCB support*/
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
int otx2_dcbnl_set_ops(struct net_device *dev);
/* PFC support */
int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
#endif

#if IS_ENABLED(CONFIG_MACSEC)
/* MACSEC offload support */
int cn10k_mcs_init(struct otx2_nic *pfvf);
void cn10k_mcs_free(struct otx2_nic *pfvf);
void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
#else
static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
					  struct mcs_intr_info *event)
{}
#endif /* CONFIG_MACSEC */

/* qos support */
static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs)
{}

static inline void otx2_shutdown_qos(struct otx2_nic *pfvf)
{}

u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
		      struct net_device *sb_dev);
int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
void otx2_qos_config_txschq(struct otx2_nic *pfvf);
void otx2_clean_qos_queues(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */