linux/drivers/gpu/drm/v3d/v3d_sched.c

// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2018 Broadcom */

/**
 * DOC: Broadcom V3D scheduling
 *
 * The shared DRM GPU scheduler is used to coordinate submitting jobs
 * to the hardware.  Each DRM fd (roughly a client process) gets its
 * own scheduler entity, which will process jobs in order.  The GPU
 * scheduler will round-robin between clients to submit the next job.
 *
 * For simplicity, and in order to keep latency low for interactive
 * jobs when bulk background jobs are queued up, we submit a new job
 * to the HW only when it has completed the last one, instead of
 * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
 * drm_sched_job_add_dependency() to manage the dependency between bin and
 * render, instead of having the clients submit jobs using the HW's
 * semaphores to interlock between them.
 */

#include <linux/sched/clock.h>
#include <linux/kthread.h>

#include <drm/drm_syncobj.h>

#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"

#define V3D_CSD_CFG012_WG_COUNT_SHIFT

static struct v3d_job *
to_v3d_job(struct drm_sched_job *sched_job)
{}

static struct v3d_bin_job *
to_bin_job(struct drm_sched_job *sched_job)
{}

static struct v3d_render_job *
to_render_job(struct drm_sched_job *sched_job)
{}

static struct v3d_tfu_job *
to_tfu_job(struct drm_sched_job *sched_job)
{}

static struct v3d_csd_job *
to_csd_job(struct drm_sched_job *sched_job)
{}

static struct v3d_cpu_job *
to_cpu_job(struct drm_sched_job *sched_job)
{}

static void
v3d_sched_job_free(struct drm_sched_job *sched_job)
{}

void
v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
			      unsigned int count)
{}

void
v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
				unsigned int count)
{}

static void
v3d_cpu_job_free(struct drm_sched_job *sched_job)
{}

static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{}

static void
v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
{}

static void
v3d_stats_update(struct v3d_stats *stats, u64 now)
{}

void
v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
{}

static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{}

static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
{}

static struct dma_fence *
v3d_tfu_job_run(struct drm_sched_job *sched_job)
{}

static struct dma_fence *
v3d_csd_job_run(struct drm_sched_job *sched_job)
{}

static void
v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
{}

static void
v3d_timestamp_query(struct v3d_cpu_job *job)
{}

static void
v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
{}

static void
write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
{}

static void
v3d_copy_query_results(struct v3d_cpu_job *job)
{}

static void
v3d_reset_performance_queries(struct v3d_cpu_job *job)
{}

static void
v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
{}

static void
v3d_copy_performance_query(struct v3d_cpu_job *job)
{}

static const v3d_cpu_job_fn cpu_job_function[] =;

static struct dma_fence *
v3d_cpu_job_run(struct drm_sched_job *sched_job)
{}

static struct dma_fence *
v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{}

static enum drm_gpu_sched_stat
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{}

/* If the current address or return address have changed, then the GPU
 * has probably made progress and we should delay the reset.  This
 * could fail if the GPU got in an infinite loop in the CL, but that
 * is pretty unlikely outside of an i-g-t testcase.
 */
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
		    u32 *timedout_ctca, u32 *timedout_ctra)
{}

static enum drm_gpu_sched_stat
v3d_bin_job_timedout(struct drm_sched_job *sched_job)
{}

static enum drm_gpu_sched_stat
v3d_render_job_timedout(struct drm_sched_job *sched_job)
{}

static enum drm_gpu_sched_stat
v3d_generic_job_timedout(struct drm_sched_job *sched_job)
{}

static enum drm_gpu_sched_stat
v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{}

static const struct drm_sched_backend_ops v3d_bin_sched_ops =;

static const struct drm_sched_backend_ops v3d_render_sched_ops =;

static const struct drm_sched_backend_ops v3d_tfu_sched_ops =;

static const struct drm_sched_backend_ops v3d_csd_sched_ops =;

static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops =;

static const struct drm_sched_backend_ops v3d_cpu_sched_ops =;

int
v3d_sched_init(struct v3d_dev *v3d)
{}

void
v3d_sched_fini(struct v3d_dev *v3d)
{}