linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h

/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Christian König
 */
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__

#include <drm/amdgpu_drm.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_print.h>
#include <drm/drm_suballoc.h>

struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_vm;

/* max number of rings */
#define AMDGPU_MAX_RINGS
#define AMDGPU_MAX_HWIP_RINGS
#define AMDGPU_MAX_GFX_RINGS
#define AMDGPU_MAX_SW_GFX_RINGS
#define AMDGPU_MAX_COMPUTE_RINGS
#define AMDGPU_MAX_VCE_RINGS
#define AMDGPU_MAX_UVD_ENC_RINGS
#define AMDGPU_MAX_VPE_RINGS

enum amdgpu_ring_priority_level {};

/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED
#define AMDGPU_FENCE_OWNER_VM
#define AMDGPU_FENCE_OWNER_KFD

#define AMDGPU_FENCE_FLAG_64BIT
#define AMDGPU_FENCE_FLAG_INT
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY
#define AMDGPU_FENCE_FLAG_EXEC

#define to_amdgpu_ring(s)

#define AMDGPU_IB_POOL_SIZE

enum amdgpu_ring_type {};

enum amdgpu_ib_pool_type {};

struct amdgpu_ib {};

struct amdgpu_sched {};

/*
 * Fences.
 */
struct amdgpu_fence_driver {};

extern const struct drm_sched_backend_ops amdgpu_sched_ops;

void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);

int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
				   struct amdgpu_irq_src *irq_src,
				   unsigned irq_type);
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
		      unsigned flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
			      uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
				      uint32_t wait_seq,
				      signed long timeout);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);

void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop);

u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
					 ktime_t timestamp);

/*
 * Rings.
 */

/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {};

struct amdgpu_ring {};

#define amdgpu_ring_parse_cs(r, p, job, ib)
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib)
#define amdgpu_ring_test_ring(r)
#define amdgpu_ring_test_ib(r, t)
#define amdgpu_ring_get_rptr(r)
#define amdgpu_ring_get_wptr(r)
#define amdgpu_ring_set_wptr(r)
#define amdgpu_ring_emit_ib(r, job, ib, flags)
#define amdgpu_ring_emit_pipeline_sync(r)
#define amdgpu_ring_emit_vm_flush(r, vmid, addr)
#define amdgpu_ring_emit_fence(r, addr, seq, flags)
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as)
#define amdgpu_ring_emit_hdp_flush(r)
#define amdgpu_ring_emit_switch_buffer(r)
#define amdgpu_ring_emit_cntxcntl(r, d)
#define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v)
#define amdgpu_ring_emit_rreg(r, d, o)
#define amdgpu_ring_emit_wreg(r, d, v)
#define amdgpu_ring_emit_reg_wait(r, d, v, m)
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m)
#define amdgpu_ring_emit_frame_cntl(r, b, s)
#define amdgpu_ring_pad_ib(r, ib)
#define amdgpu_ring_init_cond_exec(r, a)
#define amdgpu_ring_preempt_ib(r)
#define amdgpu_ring_patch_cntl(r, o)
#define amdgpu_ring_patch_ce(r, o)
#define amdgpu_ring_patch_de(r, o)

unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);

void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
		     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
		     unsigned int irq_type, unsigned int hw_prio,
		     atomic_t *sched_score);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
						uint32_t reg0, uint32_t val0,
						uint32_t reg1, uint32_t val1);
bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
			       struct dma_fence *fence);

static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
							bool cond_exec)
{}

static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{}

static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
{}

static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
					      void *src, int count_dw)
{}

/**
 * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute
 * @ring: amdgpu_ring structure
 * @offset: offset returned by amdgpu_ring_init_cond_exec
 *
 * Calculate the dw count and patch it into a cond_exec command.
 */
static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
					       unsigned int offset)
{}

#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset)

#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset)

int amdgpu_ring_test_helper(struct amdgpu_ring *ring);

void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
			      struct amdgpu_ring *ring);

int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);

static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
{}

static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
				       uint32_t value)
{}

int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
		  unsigned size,
		  enum amdgpu_ib_pool_type pool,
		  struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
		    struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
		       struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
#endif