linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c

/*
 * Copyright 2022 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include <linux/slab.h>
#include <drm/drm_print.h>

#include "amdgpu_ring_mux.h"
#include "amdgpu_ring.h"
#include "amdgpu.h"

#define AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT
#define AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US

static const struct ring_info {} sw_ring_info[] =;

static struct kmem_cache *amdgpu_mux_chunk_slab;

static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,
								struct amdgpu_ring *ring)
{}

/* copy packages on sw ring range[begin, end) */
static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux,
						  struct amdgpu_ring *ring,
						  u64 s_start, u64 s_end)
{}

static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
{}

static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
{}

static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
{}

int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
			 unsigned int entry_size)
{}

void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux)
{}

int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{}

void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr)
{}

u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{}

/**
 * amdgpu_ring_mux_get_rptr - get the readptr of the software ring
 * @mux: the multiplexer the software rings attach to
 * @ring: the software ring of which we calculate the readptr
 *
 * The return value of the readptr is not precise while the other rings could
 * write data onto the real ring buffer.After overwriting on the real ring, we
 * can not decide if our packages have been excuted or not read yet. However,
 * this function is only called by the tools such as umr to collect the latest
 * packages for the hang analysis. We assume the hang happens near our latest
 * submit. Thus we could use the following logic to give the clue:
 * If the readptr is between start and end, then we return the copy pointer
 * plus the distance from start to readptr. If the readptr is before start, we
 * return the copy pointer. Lastly, if the readptr is past end, we return the
 * write pointer.
 */
u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{}

u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring)
{}

u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{}

void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{}

/* Override insert_nop to prevent emitting nops to the software rings */
void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{}

const char *amdgpu_sw_ring_name(int idx)
{}

unsigned int amdgpu_sw_ring_priority(int idx)
{}

/*Scan on low prio rings to have unsignaled fence and high ring has no fence.*/
static int amdgpu_mcbp_scan(struct amdgpu_ring_mux *mux)
{}

/* Trigger Mid-Command Buffer Preemption (MCBP) and find if we need to resubmit. */
static int amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux *mux)
{}

void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
{}

void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
{}

void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
{}

void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{}

static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{}

void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
				    struct amdgpu_ring *ring, u64 offset,
				    enum amdgpu_ring_mux_offset_type type)
{}

void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{}

bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux)
{}