linux/drivers/thunderbolt/nhi.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Thunderbolt driver - NHI driver
 *
 * The NHI (native host interface) is the pci device that allows us to send and
 * receive frames from the thunderbolt bus.
 *
 * Copyright (c) 2014 Andreas Noever <[email protected]>
 * Copyright (C) 2018, Intel Corporation
 */

#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/string_helpers.h>

#include "nhi.h"
#include "nhi_regs.h"
#include "tb.h"

#define RING_TYPE(ring)

#define RING_FIRST_USABLE_HOPID
/*
 * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
 * transferred.
 */
#define RING_E2E_RESERVED_HOPID
/*
 * Minimal number of vectors when we use MSI-X. Two for control channel
 * Rx/Tx and the rest four are for cross domain DMA paths.
 */
#define MSIX_MIN_VECS
#define MSIX_MAX_VECS

#define NHI_MAILBOX_TIMEOUT

/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT
#define QUIRK_E2E

static bool host_reset =;
module_param(host_reset, bool, 0444);
MODULE_PARM_DESC();

static int ring_interrupt_index(const struct tb_ring *ring)
{}

static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
{}

static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
{}

/*
 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
 *
 * ring->nhi->lock must be held.
 */
static void ring_interrupt_active(struct tb_ring *ring, bool active)
{}

/*
 * nhi_disable_interrupts() - disable interrupts for all rings
 *
 * Use only during init and shutdown.
 */
static void nhi_disable_interrupts(struct tb_nhi *nhi)
{}

/* ring helper methods */

static void __iomem *ring_desc_base(struct tb_ring *ring)
{}

static void __iomem *ring_options_base(struct tb_ring *ring)
{}

static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{}

static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
{}

static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
{}

static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
{}

static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
{}

static bool ring_full(struct tb_ring *ring)
{}

static bool ring_empty(struct tb_ring *ring)
{}

/*
 * ring_write_descriptors() - post frames from ring->queue to the controller
 *
 * ring->lock is held.
 */
static void ring_write_descriptors(struct tb_ring *ring)
{}

/*
 * ring_work() - progress completed frames
 *
 * If the ring is shutting down then all frames are marked as canceled and
 * their callbacks are invoked.
 *
 * Otherwise we collect all completed frame from the ring buffer, write new
 * frame to the ring buffer and invoke the callbacks for the completed frames.
 */
static void ring_work(struct work_struct *work)
{}

int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
{}
EXPORT_SYMBOL_GPL();

/**
 * tb_ring_poll() - Poll one completed frame from the ring
 * @ring: Ring to poll
 *
 * This function can be called when @start_poll callback of the @ring
 * has been called. It will read one completed frame from the ring and
 * return it to the caller. Returns %NULL if there is no more completed
 * frames.
 */
struct ring_frame *tb_ring_poll(struct tb_ring *ring)
{}
EXPORT_SYMBOL_GPL();

static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
{}

/* Both @nhi->lock and @ring->lock should be held */
static void __ring_interrupt(struct tb_ring *ring)
{}

/**
 * tb_ring_poll_complete() - Re-start interrupt for the ring
 * @ring: Ring to re-start the interrupt
 *
 * This will re-start (unmask) the ring interrupt once the user is done
 * with polling.
 */
void tb_ring_poll_complete(struct tb_ring *ring)
{}
EXPORT_SYMBOL_GPL();

static void ring_clear_msix(const struct tb_ring *ring)
{}

static irqreturn_t ring_msix(int irq, void *data)
{}

static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
{}

static void ring_release_msix(struct tb_ring *ring)
{}

static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{}

static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
				     bool transmit, unsigned int flags,
				     int e2e_tx_hop, u16 sof_mask, u16 eof_mask,
				     void (*start_poll)(void *),
				     void *poll_data)
{}

/**
 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
 * @nhi: Pointer to the NHI the ring is to be allocated
 * @hop: HopID (ring) to allocate
 * @size: Number of entries in the ring
 * @flags: Flags for the ring
 */
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
				 unsigned int flags)
{}
EXPORT_SYMBOL_GPL();

/**
 * tb_ring_alloc_rx() - Allocate DMA ring for receive
 * @nhi: Pointer to the NHI the ring is to be allocated
 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
 * @size: Number of entries in the ring
 * @flags: Flags for the ring
 * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
 * @sof_mask: Mask of PDF values that start a frame
 * @eof_mask: Mask of PDF values that end a frame
 * @start_poll: If not %NULL the ring will call this function when an
 *		interrupt is triggered and masked, instead of callback
 *		in each Rx frame.
 * @poll_data: Optional data passed to @start_poll
 */
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
				 unsigned int flags, int e2e_tx_hop,
				 u16 sof_mask, u16 eof_mask,
				 void (*start_poll)(void *), void *poll_data)
{}
EXPORT_SYMBOL_GPL();

/**
 * tb_ring_start() - enable a ring
 * @ring: Ring to start
 *
 * Must not be invoked in parallel with tb_ring_stop().
 */
void tb_ring_start(struct tb_ring *ring)
{}
EXPORT_SYMBOL_GPL();

/**
 * tb_ring_stop() - shutdown a ring
 * @ring: Ring to stop
 *
 * Must not be invoked from a callback.
 *
 * This method will disable the ring. Further calls to
 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
 * called.
 *
 * All enqueued frames will be canceled and their callbacks will be executed
 * with frame->canceled set to true (on the callback thread). This method
 * returns only after all callback invocations have finished.
 */
void tb_ring_stop(struct tb_ring *ring)
{}
EXPORT_SYMBOL_GPL();

/*
 * tb_ring_free() - free ring
 *
 * When this method returns all invocations of ring->callback will have
 * finished.
 *
 * Ring must be stopped.
 *
 * Must NOT be called from ring_frame->callback!
 */
void tb_ring_free(struct tb_ring *ring)
{}
EXPORT_SYMBOL_GPL();

/**
 * nhi_mailbox_cmd() - Send a command through NHI mailbox
 * @nhi: Pointer to the NHI structure
 * @cmd: Command to send
 * @data: Data to be send with the command
 *
 * Sends mailbox command to the firmware running on NHI. Returns %0 in
 * case of success and negative errno in case of failure.
 */
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
{}

/**
 * nhi_mailbox_mode() - Return current firmware operation mode
 * @nhi: Pointer to the NHI structure
 *
 * The function reads current firmware operation mode using NHI mailbox
 * registers and returns it to the caller.
 */
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
{}

static void nhi_interrupt_work(struct work_struct *work)
{}

static irqreturn_t nhi_msi(int irq, void *data)
{}

static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
{}

static int nhi_suspend_noirq(struct device *dev)
{}

static int nhi_freeze_noirq(struct device *dev)
{}

static int nhi_thaw_noirq(struct device *dev)
{}

static bool nhi_wake_supported(struct pci_dev *pdev)
{}

static int nhi_poweroff_noirq(struct device *dev)
{}

static void nhi_enable_int_throttling(struct tb_nhi *nhi)
{}

static int nhi_resume_noirq(struct device *dev)
{}

static int nhi_suspend(struct device *dev)
{}

static void nhi_complete(struct device *dev)
{}

static int nhi_runtime_suspend(struct device *dev)
{}

static int nhi_runtime_resume(struct device *dev)
{}

static void nhi_shutdown(struct tb_nhi *nhi)
{}

static void nhi_check_quirks(struct tb_nhi *nhi)
{}

static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
{}

static void nhi_check_iommu(struct tb_nhi *nhi)
{}

static void nhi_reset(struct tb_nhi *nhi)
{}

static int nhi_init_msi(struct tb_nhi *nhi)
{}

static bool nhi_imr_valid(struct pci_dev *pdev)
{}

static struct tb *nhi_select_cm(struct tb_nhi *nhi)
{}

static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{}

static void nhi_remove(struct pci_dev *pdev)
{}

/*
 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
 * resume_noirq until we are done.
 */
static const struct dev_pm_ops nhi_pm_ops =;

static struct pci_device_id nhi_ids[] =;

MODULE_DEVICE_TABLE(pci, nhi_ids);
MODULE_DESCRIPTION();
MODULE_LICENSE();

static struct pci_driver nhi_driver =;

static int __init nhi_init(void)
{}

static void __exit nhi_unload(void)
{}

rootfs_initcall(nhi_init);
module_exit(nhi_unload);