linux/drivers/infiniband/hw/hfi1/user_sdma.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright(c) 2020 - 2023 Cornelis Networks, Inc.
 * Copyright(c) 2015 - 2018 Intel Corporation.
 */

#include <linux/mm.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/uio.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mmu_context.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/string.h>

#include "hfi.h"
#include "sdma.h"
#include "user_sdma.h"
#include "verbs.h"  /* for the headers */
#include "common.h" /* for struct hfi1_tid_info */
#include "trace.h"

static uint hfi1_sdma_comp_ring_size =;
module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
MODULE_PARM_DESC();

static unsigned initial_pkt_count =;

static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
static void user_sdma_free_request(struct user_sdma_request *req);
static int check_header_template(struct user_sdma_request *req,
				 struct hfi1_pkt_header *hdr, u32 lrhlen,
				 u32 datalen);
static int set_txreq_header(struct user_sdma_request *req,
			    struct user_sdma_txreq *tx, u32 datalen);
static int set_txreq_header_ahg(struct user_sdma_request *req,
				struct user_sdma_txreq *tx, u32 len);
static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
				  struct hfi1_user_sdma_comp_q *cq,
				  u16 idx, enum hfi1_sdma_comp_state state,
				  int ret);
static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);

static int defer_packet_queue(
	struct sdma_engine *sde,
	struct iowait_work *wait,
	struct sdma_txreq *txreq,
	uint seq,
	bool pkts_sent);
static void activate_packet_queue(struct iowait *wait, int reason);

static int defer_packet_queue(
	struct sdma_engine *sde,
	struct iowait_work *wait,
	struct sdma_txreq *txreq,
	uint seq,
	bool pkts_sent)
{}

static void activate_packet_queue(struct iowait *wait, int reason)
{
	struct hfi1_user_sdma_pkt_q *pq =
		container_of(wait, struct hfi1_user_sdma_pkt_q, busy);

	trace_hfi1_usdma_activate(pq, wait, reason);
	xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
	wake_up(&wait->wait_dma);
};

int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
				struct hfi1_filedata *fd)
{}

static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
{}

int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
			       struct hfi1_ctxtdata *uctxt)
{}

static u8 dlid_to_selector(u16 dlid)
{}

/**
 * hfi1_user_sdma_process_request() - Process and start a user sdma request
 * @fd: valid file descriptor
 * @iovec: array of io vectors to process
 * @dim: overall iovec array size
 * @count: number of io vector array entries processed
 */
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
				   struct iovec *iovec, unsigned long dim,
				   unsigned long *count)
{}

static inline u32 compute_data_length(struct user_sdma_request *req,
				      struct user_sdma_txreq *tx)
{}

static inline u32 pad_len(u32 len)
{}

static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
{}

static int user_sdma_txadd_ahg(struct user_sdma_request *req,
			       struct user_sdma_txreq *tx,
			       u32 datalen)
{}

static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
{}

static int check_header_template(struct user_sdma_request *req,
				 struct hfi1_pkt_header *hdr, u32 lrhlen,
				 u32 datalen)
{}

/*
 * Correctly set the BTH.PSN field based on type of
 * transfer - eager packets can just increment the PSN but
 * expected packets encode generation and sequence in the
 * BTH.PSN field so just incrementing will result in errors.
 */
static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
{}

static int set_txreq_header(struct user_sdma_request *req,
			    struct user_sdma_txreq *tx, u32 datalen)
{}

static int set_txreq_header_ahg(struct user_sdma_request *req,
				struct user_sdma_txreq *tx, u32 datalen)
{}

/**
 * user_sdma_txreq_cb() - SDMA tx request completion callback.
 * @txreq: valid sdma tx request
 * @status: success/failure of request
 *
 * Called when the SDMA progress state machine gets notification that
 * the SDMA descriptors for this tx request have been processed by the
 * DMA engine. Called in interrupt context.
 * Only do work on completed sequences.
 */
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
{}

static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{}

static void user_sdma_free_request(struct user_sdma_request *req)
{}

static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
				  struct hfi1_user_sdma_comp_q *cq,
				  u16 idx, enum hfi1_sdma_comp_state state,
				  int ret)
{}