linux/drivers/net/ethernet/cavium/thunder/nicvf_queues.h

/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2015 Cavium, Inc.
 */

#ifndef NICVF_QUEUES_H
#define NICVF_QUEUES_H

#include <linux/netdevice.h>
#include <linux/iommu.h>
#include <net/xdp.h>
#include "q_struct.h"

#define MAX_QUEUE_SET
#define MAX_RCV_QUEUES_PER_QS
#define MAX_RCV_BUF_DESC_RINGS_PER_QS
#define MAX_SND_QUEUES_PER_QS
#define MAX_CMP_QUEUES_PER_QS

/* VF's queue interrupt ranges */
#define NICVF_INTR_ID_CQ
#define NICVF_INTR_ID_SQ
#define NICVF_INTR_ID_RBDR
#define NICVF_INTR_ID_MISC
#define NICVF_INTR_ID_QS_ERR

#define for_each_cq_irq(irq)
#define for_each_sq_irq(irq)
#define for_each_rbdr_irq(irq)

#define RBDR_SIZE0
#define RBDR_SIZE1
#define RBDR_SIZE2
#define RBDR_SIZE3
#define RBDR_SIZE4
#define RBDR_SIZE5
#define RBDR_SIZE6

#define SND_QUEUE_SIZE0
#define SND_QUEUE_SIZE1
#define SND_QUEUE_SIZE2
#define SND_QUEUE_SIZE3
#define SND_QUEUE_SIZE4
#define SND_QUEUE_SIZE5
#define SND_QUEUE_SIZE6

#define CMP_QUEUE_SIZE0
#define CMP_QUEUE_SIZE1
#define CMP_QUEUE_SIZE2
#define CMP_QUEUE_SIZE3
#define CMP_QUEUE_SIZE4
#define CMP_QUEUE_SIZE5
#define CMP_QUEUE_SIZE6

/* Default queue count per QS, its lengths and threshold values */
#define DEFAULT_RBDR_CNT

#define SND_QSIZE
#define SND_QUEUE_LEN
#define MIN_SND_QUEUE_LEN
#define MAX_SND_QUEUE_LEN
#define SND_QUEUE_THRESH
#define MIN_SQ_DESC_PER_PKT_XMIT
/* Since timestamp not enabled, otherwise 2 */
#define MAX_CQE_PER_PKT_XMIT

/* Keep CQ and SQ sizes same, if timestamping
 * is enabled this equation will change.
 */
#define CMP_QSIZE
#define CMP_QUEUE_LEN
#define MIN_CMP_QUEUE_LEN
#define MAX_CMP_QUEUE_LEN
#define CMP_QUEUE_CQE_THRESH
#define CMP_QUEUE_TIMER_THRESH

/* No of CQEs that might anyway gets used by HW due to pipelining
 * effects irrespective of PASS/DROP/LEVELS being configured
 */
#define CMP_QUEUE_PIPELINE_RSVD

#define RBDR_SIZE
#define RCV_BUF_COUNT
#define MAX_RCV_BUF_COUNT
#define RBDR_THRESH
#define DMA_BUFFER_LEN
#define RCV_FRAG_LEN

#define MAX_CQES_FOR_TX

/* RED and Backpressure levels of CQ for pkt reception
 * For CQ, level is a measure of emptiness i.e 0x0 means full
 * eg: For CQ of size 4K, and for pass/drop levels of 160/144
 * HW accepts pkt if unused CQE >= 2560
 * RED accepts pkt if unused CQE < 2304 & >= 2560
 * DROPs pkts if unused CQE < 2304
 */
#define RQ_PASS_CQ_LVL
#define RQ_DROP_CQ_LVL

/* RED and Backpressure levels of RBDR for pkt reception
 * For RBDR, level is a measure of fullness i.e 0x0 means empty
 * eg: For RBDR of size 8K, and for pass/drop levels of 4/0
 * HW accepts pkt if unused RBs >= 256
 * RED accepts pkt if unused RBs < 256 & >= 0
 * DROPs pkts if unused RBs < 0
 */
#define RQ_PASS_RBDR_LVL
#define RQ_DROP_RBDR_LVL

/* Descriptor size in bytes */
#define SND_QUEUE_DESC_SIZE
#define CMP_QUEUE_DESC_SIZE

/* Buffer / descriptor alignments */
#define NICVF_RCV_BUF_ALIGN
#define NICVF_RCV_BUF_ALIGN_BYTES
#define NICVF_CQ_BASE_ALIGN_BYTES
#define NICVF_SQ_BASE_ALIGN_BYTES

#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES)

/* Queue enable/disable */
#define NICVF_SQ_EN

/* Queue reset */
#define NICVF_CQ_RESET
#define NICVF_SQ_RESET
#define NICVF_RBDR_RESET

enum CQ_RX_ERRLVL_E {};

enum CQ_RX_ERROP_E {};

enum CQ_TX_ERROP_E {};

enum RQ_SQ_STATS {};

struct rx_tx_queue_stats {} ____cacheline_aligned_in_smp;

struct q_desc_mem {};

struct pgcache {};

struct rbdr {} ____cacheline_aligned_in_smp;

struct rcv_queue {} ____cacheline_aligned_in_smp;

struct cmp_queue {} ____cacheline_aligned_in_smp;

struct snd_queue {} ____cacheline_aligned_in_smp;

struct queue_set {} ____cacheline_aligned_in_smp;

#define GET_RBDR_DESC(RING, idx)
#define GET_SQ_DESC(RING, idx)
#define GET_CQ_DESC(RING, idx)

/* CQ status bits */
#define CQ_WR_FULL
#define CQ_WR_DISABLE
#define CQ_WR_FAULT
#define CQ_CQE_COUNT

#define CQ_ERR_MASK

static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
{}

void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
			      int hdr_sqe, u8 subdesc_cnt);
void nicvf_config_vlan_stripping(struct nicvf *nic,
				 netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
void nicvf_qset_config(struct nicvf *nic, bool enable);
void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
			    int qidx, bool enable);

void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
void nicvf_sq_disable(struct nicvf *nic, int qidx);
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
void nicvf_sq_free_used_descs(struct net_device *netdev,
			      struct snd_queue *sq, int qidx);
int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
			struct sk_buff *skb, u8 sq_num);
int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
			    u64 bufaddr, u64 dma_addr, u16 len);
void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);

struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
				  struct cqe_rx_t *cqe_rx, bool xdp);
void nicvf_rbdr_task(struct tasklet_struct *t);
void nicvf_rbdr_work(struct work_struct *work);

void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);

/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64  nicvf_reg_read(struct nicvf *nic, u64 offset);
void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
			   u64 qidx, u64 val);
u64  nicvf_queue_reg_read(struct nicvf *nic,
			  u64 offset, u64 qidx);

/* Stats */
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */