linux/drivers/gpu/drm/msm/msm_gem.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <[email protected]>
 */

#include <linux/dma-map-ops.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>

#include <drm/drm_prime.h>
#include <drm/drm_file.h>

#include <trace/events/gpu_mem.h>

#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"

static dma_addr_t physaddr(struct drm_gem_object *obj)
{}

static bool use_pages(struct drm_gem_object *obj)
{}

static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
{}

static void update_ctx_mem(struct drm_file *file, ssize_t size)
{}

static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{}

static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{}

/*
 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
 * API.  Really GPU cache is out of scope here (handled on cmdstream)
 * and all we need to do is invalidate newly allocated pages before
 * mapping to CPU as uncached/writecombine.
 *
 * On top of this, we have the added headache, that depending on
 * display generation, the display's iommu may be wired up to either
 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
 * that here we either have dma-direct or iommu ops.
 *
 * Let this be a cautionary tail of abstraction gone wrong.
 */

static void sync_for_device(struct msm_gem_object *msm_obj)
{}

static void sync_for_cpu(struct msm_gem_object *msm_obj)
{}

static void update_lru_active(struct drm_gem_object *obj)
{}

static void update_lru_locked(struct drm_gem_object *obj)
{}

static void update_lru(struct drm_gem_object *obj)
{}

/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{}

static struct page **get_pages(struct drm_gem_object *obj)
{}

static void put_pages_vram(struct drm_gem_object *obj)
{}

static void put_pages(struct drm_gem_object *obj)
{}

static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
					      unsigned madv)
{}

/*
 * Update the pin count of the object, call under lru.lock
 */
void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
{}

static void pin_obj_locked(struct drm_gem_object *obj)
{}

struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{}

void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
{}

static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
{}

static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{}

/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{}

uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{}

static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{}

static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{}

static void del_vma(struct msm_gem_vma *vma)
{}

/*
 * If close is true, this also closes the VMA (releasing the allocated
 * iova range) in addition to removing the iommu mapping.  In the eviction
 * case (!close), we keep the iova allocated, but only remove the iommu
 * mapping.
 */
static void
put_iova_spaces(struct drm_gem_object *obj, bool close)
{}

/* Called with msm_obj locked */
static void
put_iova_vmas(struct drm_gem_object *obj)
{}

static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace,
		u64 range_start, u64 range_end)
{}

int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
{}

void msm_gem_unpin_locked(struct drm_gem_object *obj)
{}

/* Special unpin path for use in fence-signaling path, avoiding the need
 * to hold the obj lock by only depending on things that a protected by
 * the LRU lock.  In particular we know that that we already have backing
 * and and that the object's dma_resv has the fence for the current
 * submit/job which will prevent us racing against page eviction.
 */
void msm_gem_unpin_active(struct drm_gem_object *obj)
{}

struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
					   struct msm_gem_address_space *aspace)
{}

static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
{}

/*
 * get iova and pin it. Should have a matching put
 * limits iova to specified range (in pages)
 */
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
{}

/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{}

/*
 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 * valid for the life of the object
 */
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{}

static int clear_iova(struct drm_gem_object *obj,
		      struct msm_gem_address_space *aspace)
{}

/*
 * Get the requested iova but don't pin it.  Fails if the requested iova is
 * not available.  Doesn't need a put because iovas are currently valid for
 * the life of the object.
 *
 * Setting an iova of zero will clear the vma.
 */
int msm_gem_set_iova(struct drm_gem_object *obj,
		     struct msm_gem_address_space *aspace, uint64_t iova)
{}

/*
 * Unpin a iova by updating the reference counts. The memory isn't actually
 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 * to get rid of it
 */
void msm_gem_unpin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{}

int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		uint32_t handle, uint64_t *offset)
{}

static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{}

void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{}

void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{}

/*
 * Don't use this!  It is for the very special case of dumping
 * submits from GPU hangs or faults, were the bo may already
 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 * active list.
 */
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{}

void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{}

void msm_gem_put_vaddr(struct drm_gem_object *obj)
{}

/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{}

void msm_gem_purge(struct drm_gem_object *obj)
{}

/*
 * Unpin the backing pages and make them available to be swapped out.
 */
void msm_gem_evict(struct drm_gem_object *obj)
{}

void msm_gem_vunmap(struct drm_gem_object *obj)
{}

bool msm_gem_active(struct drm_gem_object *obj)
{}

int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{}

int msm_gem_cpu_fini(struct drm_gem_object *obj)
{}

#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
		struct msm_gem_stats *stats)
{}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{}
#endif

/* don't call directly!  Use drm_gem_object_put() */
static void msm_gem_free_object(struct drm_gem_object *obj)
{}

static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{}

/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
		uint32_t size, uint32_t flags, uint32_t *handle,
		char *name)
{}

static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
{}

static const struct vm_operations_struct vm_ops =;

static const struct drm_gem_object_funcs msm_gem_object_funcs =;

static int msm_gem_new_impl(struct drm_device *dev,
		uint32_t size, uint32_t flags,
		struct drm_gem_object **obj)
{}

struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
{}

struct drm_gem_object *msm_gem_import(struct drm_device *dev,
		struct dma_buf *dmabuf, struct sg_table *sgt)
{}

void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
		uint32_t flags, struct msm_gem_address_space *aspace,
		struct drm_gem_object **bo, uint64_t *iova)
{}

void msm_gem_kernel_put(struct drm_gem_object *bo,
		struct msm_gem_address_space *aspace)
{}

void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{}