/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include <linux/vmalloc.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> #include <drm/drm_cache.h> struct ttm_transfer_obj { … }; int ttm_mem_io_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { … } void ttm_mem_io_free(struct ttm_device *bdev, struct ttm_resource *mem) { … } /** * ttm_move_memcpy - Helper to perform a memcpy ttm move operation. * @clear: Whether to clear rather than copy. * @num_pages: Number of pages of the operation. * @dst_iter: A struct ttm_kmap_iter representing the destination resource. * @src_iter: A struct ttm_kmap_iter representing the source resource. * * This function is intended to be able to move out async under a * dma-fence if desired. */ void ttm_move_memcpy(bool clear, u32 num_pages, struct ttm_kmap_iter *dst_iter, struct ttm_kmap_iter *src_iter) { … } EXPORT_SYMBOL(…); /** * ttm_bo_move_memcpy * * @bo: A pointer to a struct ttm_buffer_object. * @ctx: operation context * @dst_mem: struct ttm_resource indicating where to move. * * Fallback move function for a mappable buffer object in mappable memory. * The function will, if successful, * free any old aperture space, and set (@new_mem)->mm_node to NULL, * and update the (@bo)->mem placement flags. If unsuccessful, the old * data remains untouched, and it's up to the caller to free the * memory space indicated by @new_mem. * Returns: * !0: Failure. */ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_resource *dst_mem) { … } EXPORT_SYMBOL(…); static void ttm_transfered_destroy(struct ttm_buffer_object *bo) { … } /** * ttm_buffer_object_transfer * * @bo: A pointer to a struct ttm_buffer_object. * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, * holding the data of @bo with the old placement. * * This is a utility function that may be called after an accelerated move * has been scheduled. A new buffer object is created as a placeholder for * the old data while it's being copied. When that buffer object is idle, * it can be destroyed, releasing the space of the old placement. * Returns: * !0: Failure. */ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_buffer_object **new_obj) { … } /** * ttm_io_prot * * @bo: ttm buffer object * @res: ttm resource object * @tmp: Page protection flag for a normal, cached mapping. * * Utility function that returns the pgprot_t that should be used for * setting up a PTE with the caching model indicated by @c_state. */ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, pgprot_t tmp) { … } EXPORT_SYMBOL(…); static int ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long offset, unsigned long size, struct ttm_bo_kmap_obj *map) { … } static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { … } /** * ttm_bo_kmap * * @bo: The buffer object. * @start_page: The first page to map. * @num_pages: Number of pages to map. * @map: pointer to a struct ttm_bo_kmap_obj representing the map. * * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the * data in the buffer object. The ttm_kmap_obj_virtual function can then be * used to obtain a virtual address to the data. * * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { … } EXPORT_SYMBOL(…); /** * ttm_bo_kunmap * * @map: Object describing the map to unmap. * * Unmaps a kernel map set up by ttm_bo_kmap. */ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { … } EXPORT_SYMBOL(…); /** * ttm_bo_vmap * * @bo: The buffer object. * @map: pointer to a struct iosys_map representing the map. * * Sets up a kernel virtual mapping, using ioremap or vmap to the * data in the buffer object. The parameter @map returns the virtual * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap(). * * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) { … } EXPORT_SYMBOL(…); /** * ttm_bo_vunmap * * @bo: The buffer object. * @map: Object describing the map to unmap. * * Unmaps a kernel map set up by ttm_bo_vmap(). */ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map) { … } EXPORT_SYMBOL(…); static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, bool dst_use_tt) { … } static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, struct dma_fence *fence, bool dst_use_tt) { … } static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, struct dma_fence *fence) { … } /** * ttm_bo_move_accel_cleanup - cleanup helper for hw copies * * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. * @pipeline: evictions are to be pipelined. * @new_mem: struct ttm_resource indicating where to move. * * Accelerated move function to be called when an accelerated move * has been scheduled. The function will create a new temporary buffer object * representing the old placement, and put the sync object on both buffer * objects. After that the newly created buffer object is unref'd to be * destroyed when the move is complete. This will help pipeline * buffer moves. */ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, bool pipeline, struct ttm_resource *new_mem) { … } EXPORT_SYMBOL(…); /** * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish * * @bo: A pointer to a struct ttm_buffer_object. * @new_mem: struct ttm_resource indicating where to move. * * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed * by the caller to be idle. Typically used after memcpy buffer moves. */ void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { … } EXPORT_SYMBOL(…); /** * ttm_bo_pipeline_gutting - purge the contents of a bo * @bo: The buffer object * * Purge the contents of a bo, async if the bo is not idle. * After a successful call, the bo is left unpopulated in * system placement. The function may wait uninterruptible * for idle on OOM. * * Return: 0 if successful, negative error code on failure. */ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { … }