linux/drivers/infiniband/sw/rxe/rxe_task.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 */

#include "rxe.h"

static struct workqueue_struct *rxe_wq;

int rxe_alloc_wq(void)
{}

void rxe_destroy_wq(void)
{}

/* Check if task is idle i.e. not running, not scheduled in
 * work queue and not draining. If so move to busy to
 * reserve a slot in do_task() by setting to busy and taking
 * a qp reference to cover the gap from now until the task finishes.
 * state will move out of busy if task returns a non zero value
 * in do_task(). If state is already busy it is raised to armed
 * to indicate to do_task that additional pass should be made
 * over the task.
 * Context: caller should hold task->lock.
 * Returns: true if state transitioned from idle to busy else false.
 */
static bool __reserve_if_idle(struct rxe_task *task)
{}

/* check if task is idle or drained and not currently
 * scheduled in the work queue. This routine is
 * called by rxe_cleanup_task or rxe_disable_task to
 * see if the queue is empty.
 * Context: caller should hold task->lock.
 * Returns true if done else false.
 */
static bool __is_done(struct rxe_task *task)
{}

/* a locked version of __is_done */
static bool is_done(struct rxe_task *task)
{}

/* do_task is a wrapper for the three tasks (requester,
 * completer, responder) and calls them in a loop until
 * they return a non-zero value. It is called either
 * directly by rxe_run_task or indirectly if rxe_sched_task
 * schedules the task. They must call __reserve_if_idle to
 * move the task to busy before calling or scheduling.
 * The task can also be moved to drained or invalid
 * by calls to rxe_cleanup_task or rxe_disable_task.
 * In that case tasks which get here are not executed but
 * just flushed. The tasks are designed to look to see if
 * there is work to do and then do part of it before returning
 * here with a return value of zero until all the work
 * has been consumed then it returns a non-zero value.
 * The number of times the task can be run is limited by
 * max iterations so one task cannot hold the cpu forever.
 * If the limit is hit and work remains the task is rescheduled.
 */
static void do_task(struct rxe_task *task)
{}

/* wrapper around do_task to fix argument for work queue */
static void do_work(struct work_struct *work)
{}

int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
		  int (*func)(struct rxe_qp *))
{}

/* rxe_cleanup_task is only called from rxe_do_qp_cleanup in
 * process context. The qp is already completed with no
 * remaining references. Once the queue is drained the
 * task is moved to invalid and returns. The qp cleanup
 * code then calls the task functions directly without
 * using the task struct to drain any late arriving packets
 * or work requests.
 */
void rxe_cleanup_task(struct rxe_task *task)
{}

/* run the task inline if it is currently idle
 * cannot call do_task holding the lock
 */
void rxe_run_task(struct rxe_task *task)
{}

/* schedule the task to run later as a work queue entry.
 * the queue_work call can be called holding
 * the lock.
 */
void rxe_sched_task(struct rxe_task *task)
{}

/* rxe_disable/enable_task are only called from
 * rxe_modify_qp in process context. Task is moved
 * to the drained state by do_task.
 */
void rxe_disable_task(struct rxe_task *task)
{}

void rxe_enable_task(struct rxe_task *task)
{}