linux/kernel/dma/debug.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2008 Advanced Micro Devices, Inc.
 *
 * Author: Joerg Roedel <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/sched/task_stack.h>
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
#include <linux/sched/task.h>
#include <linux/stacktrace.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/sections.h>
#include "debug.h"

#define HASH_SIZE
#define HASH_FN_SHIFT
#define HASH_FN_MASK

#define PREALLOC_DMA_DEBUG_ENTRIES
/* If the pool runs out, add this many new entries at once */
#define DMA_DEBUG_DYNAMIC_ENTRIES

enum {};

enum map_err_types {};

#define DMA_DEBUG_STACKTRACE_ENTRIES

/**
 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
 * @list: node on pre-allocated free_entries list
 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
 * @dev_addr: dma address
 * @size: length of the mapping
 * @type: single, page, sg, coherent
 * @direction: enum dma_data_direction
 * @sg_call_ents: 'nents' from dma_map_sg
 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
 * @pfn: page frame of the start address
 * @offset: offset of mapping relative to pfn
 * @map_err_type: track whether dma_mapping_error() was checked
 * @stack_len: number of backtrace entries in @stack_entries
 * @stack_entries: stack of backtrace history
 */
struct dma_debug_entry {} ____cacheline_aligned_in_smp;

match_fn;

struct hash_bucket {};

/* Hash list to save the allocated dma addresses */
static struct hash_bucket dma_entry_hash[HASH_SIZE];
/* List of pre-allocated dma_debug_entry's */
static LIST_HEAD(free_entries);
/* Lock for the list above */
static DEFINE_SPINLOCK(free_entries_lock);

/* Global disable flag - will be set in case of an error */
static bool global_disable __read_mostly;

/* Early initialization disable flag, set at the end of dma_debug_init */
static bool dma_debug_initialized __read_mostly;

static inline bool dma_debug_disabled(void)
{}

/* Global error count */
static u32 error_count;

/* Global error show enable*/
static u32 show_all_errors __read_mostly;
/* Number of errors to show */
static u32 show_num_errors =;

static u32 num_free_entries;
static u32 min_free_entries;
static u32 nr_total_entries;

/* number of preallocated entries requested by kernel cmdline */
static u32 nr_prealloc_entries =;

/* per-driver filter related state */

#define NAME_MAX_LEN

static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
static struct device_driver *current_driver                    __read_mostly;

static DEFINE_RWLOCK(driver_name_lock);

static const char *const maperr2str[] =;

static const char *type2name[] =;

static const char *dir2name[] =;

/*
 * The access to some variables in this macro is racy. We can't use atomic_t
 * here because all these variables are exported to debugfs. Some of them even
 * writeable. This is also the reason why a lock won't help much. But anyway,
 * the races are no big deal. Here is why:
 *
 *   error_count: the addition is racy, but the worst thing that can happen is
 *                that we don't count some errors
 *   show_num_errors: the subtraction is racy. Also no big deal because in
 *                    worst case this will result in one warning more in the
 *                    system log than the user configured. This variable is
 *                    writeable via debugfs.
 */
static inline void dump_entry_trace(struct dma_debug_entry *entry)
{}

static bool driver_filter(struct device *dev)
{}

#define err_printk(dev, entry, format, arg...)

/*
 * Hash related functions
 *
 * Every DMA-API request is saved into a struct dma_debug_entry. To
 * have quick access to these structs they are stored into a hash.
 */
static int hash_fn(struct dma_debug_entry *entry)
{}

/*
 * Request exclusive access to a hash bucket for a given dma_debug_entry.
 */
static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
					   unsigned long *flags)
	__acquires(&dma_entry_hash[idx].lock)
{}

/*
 * Give up exclusive access to the hash bucket
 */
static void put_hash_bucket(struct hash_bucket *bucket,
			    unsigned long flags)
	__releases(&bucket->lock)
{}

static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
{}

static bool containing_match(struct dma_debug_entry *a,
			     struct dma_debug_entry *b)
{}

/*
 * Search a given entry in the hash bucket list
 */
static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
						  struct dma_debug_entry *ref,
						  match_fn match)
{}

static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
						 struct dma_debug_entry *ref)
{}

static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
						   struct dma_debug_entry *ref,
						   unsigned long *flags)
{}

/*
 * Add an entry to a hash bucket
 */
static void hash_bucket_add(struct hash_bucket *bucket,
			    struct dma_debug_entry *entry)
{}

/*
 * Remove entry from a hash bucket list
 */
static void hash_bucket_del(struct dma_debug_entry *entry)
{}

static unsigned long long phys_addr(struct dma_debug_entry *entry)
{}

/*
 * For each mapping (initial cacheline in the case of
 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
 * scatterlist, or the cacheline specified in dma_map_single) insert
 * into this tree using the cacheline as the key. At
 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
 * the entry already exists at insertion time add a tag as a reference
 * count for the overlapping mappings.  For now, the overlap tracking
 * just ensures that 'unmaps' balance 'maps' before marking the
 * cacheline idle, but we should also be flagging overlaps as an API
 * violation.
 *
 * Memory usage is mostly constrained by the maximum number of available
 * dma-debug entries in that we need a free dma_debug_entry before
 * inserting into the tree.  In the case of dma_map_page and
 * dma_alloc_coherent there is only one dma_debug_entry and one
 * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
 * entries into the tree.
 */
static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
static DEFINE_SPINLOCK(radix_lock);
#define ACTIVE_CACHELINE_MAX_OVERLAP
#define CACHELINE_PER_PAGE_SHIFT
#define CACHELINES_PER_PAGE

static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
{}

static int active_cacheline_read_overlap(phys_addr_t cln)
{}

static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
{}

static void active_cacheline_inc_overlap(phys_addr_t cln)
{}

static int active_cacheline_dec_overlap(phys_addr_t cln)
{}

static int active_cacheline_insert(struct dma_debug_entry *entry)
{}

static void active_cacheline_remove(struct dma_debug_entry *entry)
{}

/*
 * Dump mappings entries on kernel space for debugging purposes
 */
void debug_dma_dump_mappings(struct device *dev)
{}

/*
 * Dump mappings entries on user space via debugfs
 */
static int dump_show(struct seq_file *seq, void *v)
{}
DEFINE_SHOW_ATTRIBUTE();

/*
 * Wrapper function for adding an entry to the hash.
 * This function takes care of locking itself.
 */
static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
{}

static int dma_debug_create_entries(gfp_t gfp)
{}

static struct dma_debug_entry *__dma_entry_alloc(void)
{}

/*
 * This should be called outside of free_entries_lock scope to avoid potential
 * deadlocks with serial consoles that use DMA.
 */
static void __dma_entry_alloc_check_leak(u32 nr_entries)
{}

/* struct dma_entry allocator
 *
 * The next two functions implement the allocator for
 * struct dma_debug_entries.
 */
static struct dma_debug_entry *dma_entry_alloc(void)
{}

static void dma_entry_free(struct dma_debug_entry *entry)
{}

/*
 * DMA-API debugging init code
 *
 * The init code does two things:
 *   1. Initialize core data structures
 *   2. Preallocate a given number of dma_debug_entry structs
 */

static ssize_t filter_read(struct file *file, char __user *user_buf,
			   size_t count, loff_t *ppos)
{}

static ssize_t filter_write(struct file *file, const char __user *userbuf,
			    size_t count, loff_t *ppos)
{}

static const struct file_operations filter_fops =;

static int __init dma_debug_fs_init(void)
{}
core_initcall_sync(dma_debug_fs_init);

static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
{}

static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
{}

void dma_debug_add_bus(const struct bus_type *bus)
{}

static int dma_debug_init(void)
{}
core_initcall(dma_debug_init);

static __init int dma_debug_cmdline(char *str)
{}

static __init int dma_debug_entries_cmdline(char *str)
{}

__setup();
__setup();

static void check_unmap(struct dma_debug_entry *ref)
{}

static void check_for_stack(struct device *dev,
			    struct page *page, size_t offset)
{}

static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
{}

static void check_sync(struct device *dev,
		       struct dma_debug_entry *ref,
		       bool to_cpu)
{}

static void check_sg_segment(struct device *dev, struct scatterlist *sg)
{}

void debug_dma_map_single(struct device *dev, const void *addr,
			    unsigned long len)
{}
EXPORT_SYMBOL();

void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
			size_t size, int direction, dma_addr_t dma_addr,
			unsigned long attrs)
{}

void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{}
EXPORT_SYMBOL();

void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
			  size_t size, int direction)
{}

void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
		      int nents, int mapped_ents, int direction,
		      unsigned long attrs)
{}

static int get_nr_mapped_entries(struct device *dev,
				 struct dma_debug_entry *ref)
{}

void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
			int nelems, int dir)
{}

void debug_dma_alloc_coherent(struct device *dev, size_t size,
			      dma_addr_t dma_addr, void *virt,
			      unsigned long attrs)
{}

void debug_dma_free_coherent(struct device *dev, size_t size,
			 void *virt, dma_addr_t dma_addr)
{}

void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
			    int direction, dma_addr_t dma_addr,
			    unsigned long attrs)
{}

void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
			      size_t size, int direction)
{}

void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
				   size_t size, int direction)
{}

void debug_dma_sync_single_for_device(struct device *dev,
				      dma_addr_t dma_handle, size_t size,
				      int direction)
{}

void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
			       int nelems, int direction)
{}

void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
				  int nelems, int direction)
{}

static int __init dma_debug_driver_setup(char *str)
{}
__setup();