// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Christian König */ /* Pooling of allocated pages is necessary because changing the caching * attributes on x86 of the linear mapping requires a costly cross CPU TLB * invalidate for those addresses. * * Additional to that allocations from the DMA coherent API are pooled as well * cause they are rather slow compared to alloc_pages+map. */ #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/highmem.h> #include <linux/sched/mm.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> #endif #include <drm/ttm/ttm_pool.h> #include <drm/ttm/ttm_tt.h> #include <drm/ttm/ttm_bo.h> #include "ttm_module.h" /** * struct ttm_pool_dma - Helper object for coherent DMA mappings * * @addr: original DMA address returned for the mapping * @vaddr: original vaddr return for the mapping and order in the lower bits */ struct ttm_pool_dma { … }; static unsigned long page_pool_size; MODULE_PARM_DESC(…) …; module_param(page_pool_size, ulong, 0644); static atomic_long_t allocated_pages; static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS]; static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS]; static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS]; static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS]; static spinlock_t shrinker_lock; static struct list_head shrinker_list; static struct shrinker *mm_shrinker; static DECLARE_RWSEM(pool_shrink_rwsem); /* Allocate pages of size 1 << order with the given gfp_flags */ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, unsigned int order) { … } /* Reset the caching and pages of size 1 << order */ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order, struct page *p) { … } /* Apply a new caching to an array of pages */ static int ttm_pool_apply_caching(struct page **first, struct page **last, enum ttm_caching caching) { … } /* Map pages of 1 << order size and fill the DMA address array */ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, struct page *p, dma_addr_t **dma_addr) { … } /* Unmap pages of 1 << order size */ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, unsigned int num_pages) { … } /* Give pages into a specific pool_type */ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) { … } /* Take pages from a specific pool_type, return NULL when nothing available */ static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) { … } /* Initialize and add a pool type to the global shrinker list */ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) { … } /* Remove a pool_type from the global shrinker list and free all pages */ static void ttm_pool_type_fini(struct ttm_pool_type *pt) { … } /* Return the pool_type to use for the given caching and order */ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) { … } /* Free pages using the global shrinker list */ static unsigned int ttm_pool_shrink(void) { … } /* Return the allocation order based for a page */ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) { … } /* Called when we got a page, either from a pool or newly allocated */ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, struct page *p, dma_addr_t **dma_addr, unsigned long *num_pages, struct page ***pages) { … } /** * ttm_pool_free_range() - Free a range of TTM pages * @pool: The pool used for allocating. * @tt: The struct ttm_tt holding the page pointers. * @caching: The page caching mode used by the range. * @start_page: index for first page to free. * @end_page: index for last page to free + 1. * * During allocation the ttm_tt page-vector may be populated with ranges of * pages with different attributes if allocation hit an error without being * able to completely fulfill the allocation. This function can be used * to free these individual ranges. */ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, enum ttm_caching caching, pgoff_t start_page, pgoff_t end_page) { … } /** * ttm_pool_alloc - Fill a ttm_tt object * * @pool: ttm_pool to use * @tt: ttm_tt object to fill * @ctx: operation context * * Fill the ttm_tt object with pages and also make sure to DMA map them when * necessary. * * Returns: 0 on successe, negative error code otherwise. */ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, struct ttm_operation_ctx *ctx) { … } EXPORT_SYMBOL(…); /** * ttm_pool_free - Free the backing pages from a ttm_tt object * * @pool: Pool to give pages back to. * @tt: ttm_tt object to unpopulate * * Give the packing pages back to a pool or free them */ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) { … } EXPORT_SYMBOL(…); /** * ttm_pool_init - Initialize a pool * * @pool: the pool to initialize * @dev: device for DMA allocations and mappings * @nid: NUMA node to use for allocations * @use_dma_alloc: true if coherent DMA alloc should be used * @use_dma32: true if GFP_DMA32 should be used * * Initialize the pool and its pool types. */ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, int nid, bool use_dma_alloc, bool use_dma32) { … } EXPORT_SYMBOL(…); /** * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete. * * This is useful to guarantee that all shrinker invocations have seen an * update, before freeing memory, similar to rcu. */ static void ttm_pool_synchronize_shrinkers(void) { … } /** * ttm_pool_fini - Cleanup a pool * * @pool: the pool to clean up * * Free all pages in the pool and unregister the types from the global * shrinker. */ void ttm_pool_fini(struct ttm_pool *pool) { … } EXPORT_SYMBOL(…); /* As long as pages are available make sure to release at least one */ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) { … } /* Return the number of pages available or SHRINK_EMPTY if we have none */ static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) { … } #ifdef CONFIG_DEBUG_FS /* Count the number of pages available in a pool_type */ static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) { … } /* Print a nice header for the order */ static void ttm_pool_debugfs_header(struct seq_file *m) { … } /* Dump information about the different pool types */ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, struct seq_file *m) { … } /* Dump the total amount of allocated pages */ static void ttm_pool_debugfs_footer(struct seq_file *m) { … } /* Dump the information for the global pools */ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data) { … } DEFINE_SHOW_ATTRIBUTE(…); /** * ttm_pool_debugfs - Debugfs dump function for a pool * * @pool: the pool to dump the information for * @m: seq_file to dump to * * Make a debugfs dump with the per pool and global information. */ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) { … } EXPORT_SYMBOL(…); /* Test the shrinker functions and dump the result */ static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data) { … } DEFINE_SHOW_ATTRIBUTE(…); #endif /** * ttm_pool_mgr_init - Initialize globals * * @num_pages: default number of pages * * Initialize the global locks and lists for the MM shrinker. */ int ttm_pool_mgr_init(unsigned long num_pages) { … } /** * ttm_pool_mgr_fini - Finalize globals * * Cleanup the global pools and unregister the MM shrinker. */ void ttm_pool_mgr_fini(void) { … }