linux/drivers/gpu/drm/omapdrm/omap_gem.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
 * Author: Rob Clark <[email protected]>
 */

#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
#include <linux/vmalloc.h>

#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>

#include "omap_drv.h"
#include "omap_dmm_tiler.h"

/*
 * GEM buffer object implementation.
 */

/* note: we use upper 8 bits of flags for driver-internal flags: */
#define OMAP_BO_MEM_DMA_API
#define OMAP_BO_MEM_SHMEM
#define OMAP_BO_MEM_DMABUF

struct omap_gem_object {};

#define to_omap_bo(x)

/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 * not necessarily pinned in TILER all the time, and (b) when they are
 * they are not necessarily page aligned, we reserve one or more small
 * regions in each of the 2d containers to use as a user-GART where we
 * can create a second page-aligned mapping of parts of the buffer
 * being accessed from userspace.
 *
 * Note that we could optimize slightly when we know that multiple
 * tiler containers are backed by the same PAT.. but I'll leave that
 * for later..
 */
#define NUM_USERGART_ENTRIES
struct omap_drm_usergart_entry {};

struct omap_drm_usergart {};

/* -----------------------------------------------------------------------------
 * Helpers
 */

/** get mmap offset */
u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{}

static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
{}

static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{}

/* -----------------------------------------------------------------------------
 * Eviction
 */

static void omap_gem_evict_entry(struct drm_gem_object *obj,
		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{}

/* Evict a buffer from usergart, if it is mapped there */
static void omap_gem_evict(struct drm_gem_object *obj)
{}

/* -----------------------------------------------------------------------------
 * Page Management
 */

/*
 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
 * held.
 */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{}

/* Release backing pages. Must be called with the omap_obj.lock held. */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{}

/* get buffer flags */
u32 omap_gem_flags(struct drm_gem_object *obj)
{}

/** get mmap size */
size_t omap_gem_mmap_size(struct drm_gem_object *obj)
{}

/* -----------------------------------------------------------------------------
 * Fault Handling
 */

/* Normal handling for the case of faulting in non-tiled buffers */
static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
		struct vm_area_struct *vma, struct vm_fault *vmf)
{}

/* Special handling for the case of faulting in 2d tiled buffers */
static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
		struct vm_area_struct *vma, struct vm_fault *vmf)
{}

/**
 * omap_gem_fault		-	pagefault handler for GEM objects
 * @vmf: fault detail
 *
 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 * does most of the work for us including the actual map/unmap calls
 * but we need to do the actual page work.
 *
 * The VMA was set up by GEM. In doing so it also ensured that the
 * vma->vm_private_data points to the GEM object that is backing this
 * mapping.
 */
static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
{}

static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{}

/* -----------------------------------------------------------------------------
 * Dumb Buffers
 */

/**
 * omap_gem_dumb_create	-	create a dumb buffer
 * @file: our client file
 * @dev: our device
 * @args: the requested arguments copied from userspace
 *
 * Allocate a buffer suitable for use for a frame buffer of the
 * form described by user space. Give userspace a handle by which
 * to reference it.
 */
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
		struct drm_mode_create_dumb *args)
{}

/**
 * omap_gem_dumb_map_offset - create an offset for a dumb buffer
 * @file: our drm client file
 * @dev: drm device
 * @handle: GEM handle to the object (from dumb_create)
 * @offset: memory map offset placeholder
 *
 * Do the necessary setup to allow the mapping of the frame buffer
 * into user memory. We don't have to do much here at the moment.
 */
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
		u32 handle, u64 *offset)
{}

#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Set scrolling position.  This allows us to implement fast scrolling
 * for console.
 *
 * Call only from non-atomic contexts.
 */
int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
{}
#endif

/* -----------------------------------------------------------------------------
 * Memory Management & DMA Sync
 */

/*
 * shmem buffers that are mapped cached are not coherent.
 *
 * We keep track of dirty pages using page faulting to perform cache management.
 * When a page is mapped to the CPU in read/write mode the device can't access
 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
 * unmapped from the CPU.
 */
static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
{}

/* Sync the buffer for CPU access.. note pages should already be
 * attached, ie. omap_gem_get_pages()
 */
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
{}

/* sync the buffer for DMA access */
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
		enum dma_data_direction dir)
{}

static int omap_gem_pin_tiler(struct drm_gem_object *obj)
{}

/**
 * omap_gem_pin() - Pin a GEM object in memory
 * @obj: the GEM object
 * @dma_addr: the DMA address
 *
 * Pin the given GEM object in memory and fill the dma_addr pointer with the
 * object's DMA address. If the buffer is not physically contiguous it will be
 * remapped through the TILER to provide a contiguous view.
 *
 * Pins are reference-counted, calling this function multiple times is allowed
 * as long the corresponding omap_gem_unpin() calls are balanced.
 *
 * Return 0 on success or a negative error code otherwise.
 */
int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
{}

/**
 * omap_gem_unpin_locked() - Unpin a GEM object from memory
 * @obj: the GEM object
 *
 * omap_gem_unpin() without locking.
 */
static void omap_gem_unpin_locked(struct drm_gem_object *obj)
{}

/**
 * omap_gem_unpin() - Unpin a GEM object from memory
 * @obj: the GEM object
 *
 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
 * reference-counted, the actual unpin will only be performed when the number
 * of calls to this function matches the number of calls to omap_gem_pin().
 */
void omap_gem_unpin(struct drm_gem_object *obj)
{}

/* Get rotated scanout address (only valid if already pinned), at the
 * specified orientation and x,y offset from top-left corner of buffer
 * (only valid for tiled 2d buffers)
 */
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
		int x, int y, dma_addr_t *dma_addr)
{}

/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{}

/* if !remap, and we don't have pages backing, then fail, rather than
 * increasing the pin count (which we don't really do yet anyways,
 * because we don't support swapping pages back out).  And 'remap'
 * might not be quite the right name, but I wanted to keep it working
 * similarly to omap_gem_pin().  Note though that mutex is not
 * aquired if !remap (because this can be called in atomic ctxt),
 * but probably omap_gem_unpin() should be changed to work in the
 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 * required (and should not be made).
 */
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
		bool remap)
{}

/* release pages when DMA no longer being performed */
int omap_gem_put_pages(struct drm_gem_object *obj)
{}

struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
		enum dma_data_direction dir)
{}

void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
{}

#ifdef CONFIG_DRM_FBDEV_EMULATION
/*
 * Get kernel virtual address for CPU access.. this more or less only
 * exists for omap_fbdev.
 */
void *omap_gem_vaddr(struct drm_gem_object *obj)
{}
#endif

/* -----------------------------------------------------------------------------
 * Power Management
 */

#ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */
int omap_gem_resume(struct drm_device *dev)
{}
#endif

/* -----------------------------------------------------------------------------
 * DebugFS
 */

#ifdef CONFIG_DEBUG_FS
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{}

void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
{}
#endif

/* -----------------------------------------------------------------------------
 * Constructor & Destructor
 */

static void omap_gem_free_object(struct drm_gem_object *obj)
{}

static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
{}

static const struct vm_operations_struct omap_gem_vm_ops =;

static const struct drm_gem_object_funcs omap_gem_object_funcs =;

/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
		union omap_gem_size gsize, u32 flags)
{}

struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
					   struct sg_table *sgt)
{}

/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
		union omap_gem_size gsize, u32 flags, u32 *handle)
{}

/* -----------------------------------------------------------------------------
 * Init & Cleanup
 */

/* If DMM is used, we need to set some stuff up.. */
void omap_gem_init(struct drm_device *dev)
{}

void omap_gem_deinit(struct drm_device *dev)
{}