linux/drivers/firmware/arm_scmi/virtio.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Virtio Transport driver for Arm System Control and Management Interface
 * (SCMI).
 *
 * Copyright (C) 2020-2022 OpenSynergy.
 * Copyright (C) 2021-2022 ARM Ltd.
 */

/**
 * DOC: Theory of Operation
 *
 * The scmi-virtio transport implements a driver for the virtio SCMI device.
 *
 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
 * channel (virtio eventq, P2A channel). Each channel is implemented through a
 * virtqueue. Access to each virtqueue is protected by spinlocks.
 */

#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>

#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_scmi.h>

#include "common.h"

#define VIRTIO_MAX_RX_TIMEOUT_MS
#define VIRTIO_SCMI_MAX_MSG_SIZE
#define VIRTIO_SCMI_MAX_PDU_SIZE
#define DESCRIPTORS_PER_TX_MSG

/**
 * struct scmi_vio_channel - Transport channel information
 *
 * @vqueue: Associated virtqueue
 * @cinfo: SCMI Tx or Rx channel
 * @free_lock: Protects access to the @free_list.
 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
 * @deferred_tx_work: Worker for TX deferred replies processing
 * @deferred_tx_wq: Workqueue for TX deferred replies
 * @pending_lock: Protects access to the @pending_cmds_list.
 * @pending_cmds_list: List of pre-fetched commands queueud for later processing
 * @is_rx: Whether channel is an Rx channel
 * @max_msg: Maximum number of pending messages for this channel.
 * @lock: Protects access to all members except users, free_list and
 *	  pending_cmds_list.
 * @shutdown_done: A reference to a completion used when freeing this channel.
 * @users: A reference count to currently active users of this channel.
 */
struct scmi_vio_channel {};

enum poll_states {};

/**
 * struct scmi_vio_msg - Transport PDU information
 *
 * @request: SDU used for commands
 * @input: SDU used for (delayed) responses and notifications
 * @list: List which scmi_vio_msg may be part of
 * @rx_len: Input SDU size in bytes, once input has been received
 * @poll_idx: Last used index registered for polling purposes if this message
 *	      transaction reply was configured for polling.
 * @poll_status: Polling state for this message.
 * @poll_lock: A lock to protect @poll_status
 * @users: A reference count to track this message users and avoid premature
 *	   freeing (and reuse) when polling and IRQ execution paths interleave.
 */
struct scmi_vio_msg {};

/* Only one SCMI VirtIO device can possibly exist */
static struct virtio_device *scmi_vdev;

static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
				   struct scmi_chan_info *cinfo)
{}

static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
{}

static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
{}

static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
{}

/* Assumes to be called with vio channel acquired already */
static struct scmi_vio_msg *
scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
{}

static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
{}

/* Assumes to be called with vio channel acquired already */
static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
					struct scmi_vio_msg *msg)
{}

static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
{}

static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
			       struct scmi_vio_msg *msg)
{}

/*
 * Assume to be called with channel already acquired or not ready at all;
 * vioch->lock MUST NOT have been already acquired.
 */
static void scmi_finalize_message(struct scmi_vio_channel *vioch,
				  struct scmi_vio_msg *msg)
{}

static void scmi_vio_complete_cb(struct virtqueue *vqueue)
{}

static void scmi_vio_deferred_tx_worker(struct work_struct *work)
{}

static struct virtqueue_info scmi_vio_vqs_info[] =;

static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
{}

static int virtio_link_supplier(struct device *dev)
{}

static bool virtio_chan_available(struct device_node *of_node, int idx)
{}

static void scmi_destroy_tx_workqueue(void *deferred_tx_wq)
{}

static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
			     bool tx)
{}

static int virtio_chan_free(int id, void *p, void *data)
{}

static int virtio_send_message(struct scmi_chan_info *cinfo,
			       struct scmi_xfer *xfer)
{}

static void virtio_fetch_response(struct scmi_chan_info *cinfo,
				  struct scmi_xfer *xfer)
{}

static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
				      size_t max_len, struct scmi_xfer *xfer)
{}

/**
 * virtio_mark_txdone  - Mark transmission done
 *
 * Free only completed polling transfer messages.
 *
 * Note that in the SCMI VirtIO transport we never explicitly release still
 * outstanding but timed-out messages by forcibly re-adding them to the
 * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
 * TX deferred worker, eventually clean up such messages once, finally, a late
 * reply is received and discarded (if ever).
 *
 * This approach was deemed preferable since those pending timed-out buffers are
 * still effectively owned by the SCMI platform VirtIO device even after timeout
 * expiration: forcibly freeing and reusing them before they had been returned
 * explicitly by the SCMI platform could lead to subtle bugs due to message
 * corruption.
 * An SCMI platform VirtIO device which never returns message buffers is
 * anyway broken and it will quickly lead to exhaustion of available messages.
 *
 * For this same reason, here, we take care to free only the polled messages
 * that had been somehow replied (only if not by chance already processed on the
 * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
 * any timed-out polled message if that indeed appears to have been at least
 * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such
 * messages won't be freed elsewhere. Any other polled message is marked as
 * VIO_MSG_POLL_TIMEOUT.
 *
 * Possible late replies to timed-out polled messages will be eventually freed
 * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
 * dequeued on some other polling path.
 *
 * @cinfo: SCMI channel info
 * @ret: Transmission return code
 * @xfer: Transfer descriptor
 */
static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
			       struct scmi_xfer *xfer)
{}

/**
 * virtio_poll_done  - Provide polling support for VirtIO transport
 *
 * @cinfo: SCMI channel info
 * @xfer: Reference to the transfer being poll for.
 *
 * VirtIO core provides a polling mechanism based only on last used indexes:
 * this means that it is possible to poll the virtqueues waiting for something
 * new to arrive from the host side, but the only way to check if the freshly
 * arrived buffer was indeed what we were waiting for is to compare the newly
 * arrived message descriptor with the one we are polling on.
 *
 * As a consequence it can happen to dequeue something different from the buffer
 * we were poll-waiting for: if that is the case such early fetched buffers are
 * then added to a the @pending_cmds_list list for later processing by a
 * dedicated deferred worker.
 *
 * So, basically, once something new is spotted we proceed to de-queue all the
 * freshly received used buffers until we found the one we were polling on, or,
 * we have 'seemingly' emptied the virtqueue; if some buffers are still pending
 * in the vqueue at the end of the polling loop (possible due to inherent races
 * in virtqueues handling mechanisms), we similarly kick the deferred worker
 * and let it process those, to avoid indefinitely looping in the .poll_done
 * busy-waiting helper.
 *
 * Finally, we delegate to the deferred worker also the final free of any timed
 * out reply to a polled message that we should dequeue.
 *
 * Note that, since we do NOT have per-message suppress notification mechanism,
 * the message we are polling for could be alternatively delivered via usual
 * IRQs callbacks on another core which happened to have IRQs enabled while we
 * are actively polling for it here: in such a case it will be handled as such
 * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
 * transparently terminated anyway.
 *
 * Return: True once polling has successfully completed.
 */
static bool virtio_poll_done(struct scmi_chan_info *cinfo,
			     struct scmi_xfer *xfer)
{}

static const struct scmi_transport_ops scmi_virtio_ops =;

static int scmi_vio_probe(struct virtio_device *vdev)
{}

static void scmi_vio_remove(struct virtio_device *vdev)
{}

static int scmi_vio_validate(struct virtio_device *vdev)
{}

static unsigned int features[] =;

static const struct virtio_device_id id_table[] =;

static struct virtio_driver virtio_scmi_driver =;

static int __init virtio_scmi_init(void)
{}

static void virtio_scmi_exit(void)
{}

const struct scmi_desc scmi_virtio_desc =;