linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h

#ifndef __NVKM_RUNL_H__
#define __NVKM_RUNL_H__
#include <core/intr.h>
struct nvkm_cctx;
struct nvkm_cgrp;
struct nvkm_chan;
struct nvkm_memory;
struct nvkm_object;
struct nvkm_vctx;
enum nvkm_subdev_type;

struct nvkm_engn {};

#define ENGN_PRINT(e,l,p,f,a...)
#define ENGN_DEBUG(e,f,a...)

struct nvkm_runl {};

struct nvkm_runl *nvkm_runl_new(struct nvkm_fifo *, int runi, u32 addr, int id_nr);
struct nvkm_runl *nvkm_runl_get(struct nvkm_fifo *, int runi, u32 addr);
struct nvkm_engn *nvkm_runl_add(struct nvkm_runl *, int engi, const struct nvkm_engn_func *,
				enum nvkm_subdev_type, int inst);
void nvkm_runl_del(struct nvkm_runl *);
void nvkm_runl_fini(struct nvkm_runl *);
void nvkm_runl_block(struct nvkm_runl *);
void nvkm_runl_allow(struct nvkm_runl *);
void nvkm_runl_update_locked(struct nvkm_runl *, bool wait);
bool nvkm_runl_update_pending(struct nvkm_runl *);
int nvkm_runl_preempt_wait(struct nvkm_runl *);

void nvkm_runl_rc_engn(struct nvkm_runl *, struct nvkm_engn *);
void nvkm_runl_rc_cgrp(struct nvkm_cgrp *);

struct nvkm_cgrp *nvkm_runl_cgrp_get_cgid(struct nvkm_runl *, int cgid, unsigned long *irqflags);
struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
struct nvkm_chan *nvkm_runl_chan_get_inst(struct nvkm_runl *, u64 inst, unsigned long *irqflags);

#define nvkm_runl_find_engn(engn,runl,cond)

#define nvkm_runl_first(fifo)
#define nvkm_runl_foreach(runl,fifo)
#define nvkm_runl_foreach_cond(runl,fifo,cond)
#define nvkm_runl_foreach_engn(engn,runl)
#define nvkm_runl_foreach_engn_cond(engn,runl,cond)
#define nvkm_runl_foreach_cgrp(cgrp,runl)
#define nvkm_runl_foreach_cgrp_safe(cgrp,gtmp,runl)

#define RUNL_PRINT(r,l,p,f,a...)
#define RUNL_ERROR(r,f,a...)
#define RUNL_DEBUG(r,f,a...)
#define RUNL_TRACE(r,f,a...)
#endif