linux/include/net/page_pool/types.h

/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _NET_PAGE_POOL_TYPES_H
#define _NET_PAGE_POOL_TYPES_H

#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
#include <linux/types.h>
#include <net/netmem.h>

#define PP_FLAG_DMA_MAP
#define PP_FLAG_DMA_SYNC_DEV
#define PP_FLAG_SYSTEM_POOL

/* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting
 * this must be able to support unreadable netmem, where netmem_address() would
 * return NULL. This flag should not be set for header page_pools.
 *
 * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set
 * page_pool_params.slow.queue_idx.
 */
#define PP_FLAG_ALLOW_UNREADABLE_NETMEM

#define PP_FLAG_ALL

/*
 * Fast allocation side cache array/stack
 *
 * The cache size and refill watermark is related to the network
 * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
 * ring is usually refilled and the max consumed elements will be 64,
 * thus a natural max size of objects needed in the cache.
 *
 * Keeping room for more objects, is due to XDP_DROP use-case.  As
 * XDP_DROP allows the opportunity to recycle objects directly into
 * this array, as it shares the same softirq/NAPI protection.  If
 * cache is already full (or partly full) then the XDP_DROP recycles
 * would have to take a slower code path.
 */
#define PP_ALLOC_CACHE_SIZE
#define PP_ALLOC_CACHE_REFILL
struct pp_alloc_cache {};

/**
 * struct page_pool_params - page pool parameters
 * @fast:	params accessed frequently on hotpath
 * @order:	2^order pages on allocation
 * @pool_size:	size of the ptr_ring
 * @nid:	NUMA node id to allocate from pages from
 * @dev:	device, for DMA pre-mapping purposes
 * @napi:	NAPI which is the sole consumer of pages, otherwise NULL
 * @dma_dir:	DMA mapping direction
 * @max_len:	max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
 * @offset:	DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
 * @slow:	params with slowpath access only (initialization and Netlink)
 * @netdev:	netdev this pool will serve (leave as NULL if none or multiple)
 * @queue_idx:	queue idx this page_pool is being created for.
 * @flags:	PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL,
 *		PP_FLAG_ALLOW_UNREADABLE_NETMEM.
 */
struct page_pool_params {};

#ifdef CONFIG_PAGE_POOL_STATS
/**
 * struct page_pool_alloc_stats - allocation statistics
 * @fast:	successful fast path allocations
 * @slow:	slow path order-0 allocations
 * @slow_high_order: slow path high order allocations
 * @empty:	ptr ring is empty, so a slow path allocation was forced
 * @refill:	an allocation which triggered a refill of the cache
 * @waive:	pages obtained from the ptr ring that cannot be added to
 *		the cache due to a NUMA mismatch
 */
struct page_pool_alloc_stats {};

/**
 * struct page_pool_recycle_stats - recycling (freeing) statistics
 * @cached:	recycling placed page in the page pool cache
 * @cache_full:	page pool cache was full
 * @ring:	page placed into the ptr ring
 * @ring_full:	page released from page pool because the ptr ring was full
 * @released_refcnt:	page released (and not recycled) because refcnt > 1
 */
struct page_pool_recycle_stats {};

/**
 * struct page_pool_stats - combined page pool use statistics
 * @alloc_stats:	see struct page_pool_alloc_stats
 * @recycle_stats:	see struct page_pool_recycle_stats
 *
 * Wrapper struct for combining page pool stats with different storage
 * requirements.
 */
struct page_pool_stats {};
#endif

/* The whole frag API block must stay within one cacheline. On 32-bit systems,
 * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``.
 * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``.
 * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that
 * one for simplicity.
 * Having it aligned to a cacheline boundary may be excessive and doesn't bring
 * any good.
 */
#define PAGE_POOL_FRAG_GROUP_ALIGN

struct pp_memory_provider_params {};

struct page_pool {};

struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
				  unsigned int size, gfp_t gfp);
netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
				       unsigned int *offset, unsigned int size,
				       gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
					  int cpuid);

struct xdp_mem_info;

#ifdef CONFIG_PAGE_POOL
void page_pool_disable_direct_recycling(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
			   const struct xdp_mem_info *mem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
			     int count);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
}

static inline void page_pool_use_xdp_mem(struct page_pool *pool,
					 void (*disconnect)(void *),
					 const struct xdp_mem_info *mem)
{
}

static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
					   int count)
{
}
#endif

void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
				  unsigned int dma_sync_size,
				  bool allow_direct);
void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
				unsigned int dma_sync_size,
				bool allow_direct);

static inline bool is_page_pool_compiled_in(void)
{}

/* Caller must provide appropriate safe context, e.g. NAPI. */
void page_pool_update_nid(struct page_pool *pool, int new_nid);

#endif /* _NET_PAGE_POOL_H */