linux/sound/core/memalloc.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *  Copyright (c) by Jaroslav Kysela <[email protected]>
 *                   Takashi Iwai <[email protected]>
 * 
 *  Generic memory allocators
 */

#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/dma-map-ops.h>
#include <linux/genalloc.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <sound/memalloc.h>

struct snd_malloc_ops {};

#define DEFAULT_GFP

static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);

static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{}

/**
 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
 *	type and direction
 * @type: the DMA buffer type
 * @device: the device pointer
 * @dir: DMA direction
 * @size: the buffer size to allocate
 * @dmab: buffer allocation record to store the allocated data
 *
 * Calls the memory-allocator function for the corresponding
 * buffer type.
 *
 * Return: Zero if the buffer with the given size is allocated successfully,
 * otherwise a negative value on error.
 */
int snd_dma_alloc_dir_pages(int type, struct device *device,
			    enum dma_data_direction dir, size_t size,
			    struct snd_dma_buffer *dmab)
{}
EXPORT_SYMBOL();

/**
 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
 * @type: the DMA buffer type
 * @device: the device pointer
 * @size: the buffer size to allocate
 * @dmab: buffer allocation record to store the allocated data
 *
 * Calls the memory-allocator function for the corresponding
 * buffer type.  When no space is left, this function reduces the size and
 * tries to allocate again.  The size actually allocated is stored in
 * res_size argument.
 *
 * Return: Zero if the buffer with the given size is allocated successfully,
 * otherwise a negative value on error.
 */
int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
				 struct snd_dma_buffer *dmab)
{}
EXPORT_SYMBOL();

/**
 * snd_dma_free_pages - release the allocated buffer
 * @dmab: the buffer allocation record to release
 *
 * Releases the allocated buffer via snd_dma_alloc_pages().
 */
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{}
EXPORT_SYMBOL();

/* called by devres */
static void __snd_release_pages(struct device *dev, void *res)
{}

/**
 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
 * @dev: the device pointer
 * @type: the DMA buffer type
 * @dir: DMA direction
 * @size: the buffer size to allocate
 *
 * Allocate buffer pages depending on the given type and manage using devres.
 * The pages will be released automatically at the device removal.
 *
 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
 * SNDRV_DMA_TYPE_VMALLOC type.
 *
 * Return: the snd_dma_buffer object at success, or NULL if failed
 */
struct snd_dma_buffer *
snd_devm_alloc_dir_pages(struct device *dev, int type,
			 enum dma_data_direction dir, size_t size)
{}
EXPORT_SYMBOL_GPL();

/**
 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
 * @dmab: buffer allocation information
 * @area: VM area information
 *
 * Return: zero if successful, or a negative error code
 */
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
			struct vm_area_struct *area)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_HAS_DMA
/**
 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
 * @dmab: buffer allocation information
 * @mode: sync mode
 */
void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
			 enum snd_dma_sync_mode mode)
{}
EXPORT_SYMBOL_GPL();
#endif /* CONFIG_HAS_DMA */

/**
 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
 * @dmab: buffer allocation information
 * @offset: offset in the ring buffer
 *
 * Return: the physical address
 */
dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
{}
EXPORT_SYMBOL();

/**
 * snd_sgbuf_get_page - return the physical page at the corresponding offset
 * @dmab: buffer allocation information
 * @offset: offset in the ring buffer
 *
 * Return: the page pointer
 */
struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
{}
EXPORT_SYMBOL();

/**
 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
 *	on sg-buffer
 * @dmab: buffer allocation information
 * @ofs: offset in the ring buffer
 * @size: the requested size
 *
 * Return: the chunk size
 */
unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
				      unsigned int ofs, unsigned int size)
{}
EXPORT_SYMBOL();

/*
 * Continuous pages allocator
 */
static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
			    bool wc)
{}

static void do_free_pages(void *p, size_t size, bool wc)
{}


static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
				   struct vm_area_struct *area)
{}

static const struct snd_malloc_ops snd_dma_continuous_ops =;

/*
 * VMALLOC allocator
 */
static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
				struct vm_area_struct *area)
{}

#define get_vmalloc_page_addr(dmab, offset)

static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
					   size_t offset)
{}

static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
					     size_t offset)
{}

static unsigned int
snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
			       unsigned int ofs, unsigned int size)
{}

static const struct snd_malloc_ops snd_dma_vmalloc_ops =;

#ifdef CONFIG_HAS_DMA
/*
 * IRAM allocator
 */
#ifdef CONFIG_GENERIC_ALLOCATOR
static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
			     struct vm_area_struct *area)
{}

static const struct snd_malloc_ops snd_dma_iram_ops =;
#endif /* CONFIG_GENERIC_ALLOCATOR */

/*
 * Coherent device pages allocator
 */
static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
			    struct vm_area_struct *area)
{}

static const struct snd_malloc_ops snd_dma_dev_ops =;

/*
 * Write-combined pages
 */
#ifdef CONFIG_SND_DMA_SGBUF
/* x86-specific allocations */
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
			   struct vm_area_struct *area)
{}
#else
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
{
	return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
}

static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
{
	dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
}

static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
			   struct vm_area_struct *area)
{
	return dma_mmap_wc(dmab->dev.dev, area,
			   dmab->area, dmab->addr, dmab->bytes);
}
#endif

static const struct snd_malloc_ops snd_dma_wc_ops =;

/*
 * Non-contiguous pages allocator
 */
static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
				  struct vm_area_struct *area)
{}

static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
				   enum snd_dma_sync_mode mode)
{}

static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
					      struct sg_page_iter *piter,
					      size_t offset)
{}

static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
					     size_t offset)
{}

static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
					       size_t offset)
{}

static unsigned int
snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
				 unsigned int ofs, unsigned int size)
{}

static const struct snd_malloc_ops snd_dma_noncontig_ops =;

#ifdef CONFIG_SND_DMA_SGBUF
/* Fallback SG-buffer allocations for x86 */
struct snd_dma_sg_fallback {};

static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
				       struct snd_dma_sg_fallback *sgbuf)
{}

/* fallback manual S/G buffer allocations */
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
				    struct vm_area_struct *area)
{}

static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static const struct snd_malloc_ops snd_dma_sg_ops =;
#endif /* CONFIG_SND_DMA_SGBUF */

/*
 * Non-coherent pages allocator
 */
static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
{}

static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
{}

static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
				    struct vm_area_struct *area)
{}

static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
				     enum snd_dma_sync_mode mode)
{}

static const struct snd_malloc_ops snd_dma_noncoherent_ops =;

#endif /* CONFIG_HAS_DMA */

/*
 * Entry points
 */
static const struct snd_malloc_ops *snd_dma_ops[] =;

static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
{}