linux/drivers/net/wwan/t7xx/t7xx_hif_cldma.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2021, MediaTek Inc.
 * Copyright (c) 2021-2022, Intel Corporation.
 *
 * Authors:
 *  Amir Hanania <[email protected]>
 *  Haijun Liu <[email protected]>
 *  Moises Veleta <[email protected]>
 *  Ricardo Martinez <[email protected]>
 *  Sreehari Kancharla <[email protected]>
 *
 * Contributors:
 *  Andy Shevchenko <[email protected]>
 *  Chiranjeevi Rapolu <[email protected]>
 *  Eliot Lee <[email protected]>
 */

#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/dma-direction.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iopoll.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>

#include "t7xx_cldma.h"
#include "t7xx_hif_cldma.h"
#include "t7xx_mhccif.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_port_proxy.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"

#define MAX_TX_BUDGET
#define MAX_RX_BUDGET

#define CHECK_Q_STOP_TIMEOUT_US
#define CHECK_Q_STOP_STEP_US

static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
				     enum mtk_txrx tx_rx, unsigned int index)
{}

static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
				    enum mtk_txrx tx_rx, unsigned int index)
{}

static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
{}

static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
{}

static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
					size_t size, gfp_t gfp_mask)
{}

static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
{}

static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
{}

static void t7xx_cldma_rx_done(struct work_struct *work)
{}

static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
{}

static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
{}

static void t7xx_cldma_tx_done(struct work_struct *work)
{}

static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
				 struct cldma_ring *ring, enum dma_data_direction tx_rx)
{}

static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
{}

static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
{}

static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
{}

static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
{}

/**
 * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
 * @queue: Pointer to the queue structure.
 *
 * Called with ring_lock (unless called during initialization phase)
 */
static void t7xx_cldma_q_reset(struct cldma_queue *queue)
{}

static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
{}

static void t7xx_cldma_txq_init(struct cldma_queue *queue)
{}

static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
{}

static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
{}

static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
{}

static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
{}

/**
 * t7xx_cldma_stop() - Stop CLDMA.
 * @md_ctrl: CLDMA context structure.
 *
 * Stop TX and RX queues. Disable L1 and L2 interrupts.
 * Clear status registers.
 *
 * Return:
 * * 0		- Success.
 * * -ERROR	- Error code from polling cldma_queues_active.
 */
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
{}

static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
{}

void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
{}

/**
 * t7xx_cldma_start() - Start CLDMA.
 * @md_ctrl: CLDMA context structure.
 *
 * Set TX/RX start address.
 * Start all RX queues and enable L2 interrupt.
 */
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
{}

static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
{}

static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
{}

void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
{}

void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
{}

static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
					    struct sk_buff *skb)
{}

/* Called with cldma_lock */
static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
				     struct cldma_request *prev_req)
{}

/**
 * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
 * @queue: CLDMA queue.
 * @recv_skb: Receiving skb callback.
 */
void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
			     int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{}

/**
 * t7xx_cldma_send_skb() - Send control data to modem.
 * @md_ctrl: CLDMA context structure.
 * @qno: Queue number.
 * @skb: Socket buffer.
 *
 * Return:
 * * 0		- Success.
 * * -ENOMEM	- Allocation failure.
 * * -EINVAL	- Invalid queue request.
 * * -EIO	- Queue is not active.
 * * -ETIMEDOUT	- Timeout waiting for the device to wake up.
 */
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
{}

static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{}

static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{}

static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
{}

static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
{}

static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{}

int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{}

static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{}

static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{}

static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{}

static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{}

static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
{}

static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
{}

void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
{}

static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
{}

static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
{}

/**
 * t7xx_cldma_init() - Initialize CLDMA.
 * @md_ctrl: CLDMA context structure.
 *
 * Allocate and initialize device power management entity.
 * Initialize HIF TX/RX queue structure.
 * Register CLDMA callback ISR with PCIe driver.
 *
 * Return:
 * * 0		- Success.
 * * -ERROR	- Error code from failure sub-initializations.
 */
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
{}

void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{}

void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
{}