#include <linux/error-injection.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <trace/events/page_pool.h>
#include "page_pool_priv.h"
#define DEFER_TIME …
#define DEFER_WARN_INTERVAL …
#define BIAS_MAX …
#ifdef CONFIG_PAGE_POOL_STATS
static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
#define alloc_stat_inc(pool, __stat) …
#define recycle_stat_inc(pool, __stat) …
#define recycle_stat_add(pool, __stat, val) …
static const char pp_stats[][ETH_GSTRING_LEN] = …;
bool page_pool_get_stats(const struct page_pool *pool,
struct page_pool_stats *stats)
{ … }
EXPORT_SYMBOL(…);
u8 *page_pool_ethtool_stats_get_strings(u8 *data)
{ … }
EXPORT_SYMBOL(…);
int page_pool_ethtool_stats_get_count(void)
{ … }
EXPORT_SYMBOL(…);
u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
{ … }
EXPORT_SYMBOL(…);
#else
#define alloc_stat_inc …
#define recycle_stat_inc …
#define recycle_stat_add …
#endif
static bool page_pool_producer_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{ … }
static void page_pool_producer_unlock(struct page_pool *pool,
bool in_softirq)
__releases(&pool->ring.producer_lock)
{ … }
static void page_pool_struct_check(void)
{ … }
static int page_pool_init(struct page_pool *pool,
const struct page_pool_params *params,
int cpuid)
{ … }
static void page_pool_uninit(struct page_pool *pool)
{ … }
struct page_pool *
page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
{ … }
EXPORT_SYMBOL(…);
struct page_pool *page_pool_create(const struct page_pool_params *params)
{ … }
EXPORT_SYMBOL(…);
static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
{ … }
static netmem_ref __page_pool_get_cached(struct page_pool *pool)
{ … }
static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
netmem_ref netmem,
u32 dma_sync_size)
{ … }
static __always_inline void
page_pool_dma_sync_for_device(const struct page_pool *pool,
netmem_ref netmem,
u32 dma_sync_size)
{ … }
static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
{ … }
static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
{ … }
static void page_pool_clear_pp_info(netmem_ref netmem)
{ … }
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{ … }
static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
gfp_t gfp)
{ … }
netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
{ … }
EXPORT_SYMBOL(…);
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
{ … }
EXPORT_SYMBOL(…);
ALLOW_ERROR_INJECTION(…);
#define _distance(a, b) …
s32 page_pool_inflight(const struct page_pool *pool, bool strict)
{ … }
static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
netmem_ref netmem)
{ … }
void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
{ … }
static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
{ … }
static bool page_pool_recycle_in_cache(netmem_ref netmem,
struct page_pool *pool)
{ … }
static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
{ … }
static __always_inline netmem_ref
__page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
unsigned int dma_sync_size, bool allow_direct)
{ … }
static bool page_pool_napi_local(const struct page_pool *pool)
{ … }
void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
unsigned int dma_sync_size, bool allow_direct)
{ … }
EXPORT_SYMBOL(…);
void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, bool allow_direct)
{ … }
EXPORT_SYMBOL(…);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{ … }
EXPORT_SYMBOL(…);
static netmem_ref page_pool_drain_frag(struct page_pool *pool,
netmem_ref netmem)
{ … }
static void page_pool_free_frag(struct page_pool *pool)
{ … }
netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
unsigned int *offset, unsigned int size,
gfp_t gfp)
{ … }
EXPORT_SYMBOL(…);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp)
{ … }
EXPORT_SYMBOL(…);
static void page_pool_empty_ring(struct page_pool *pool)
{ … }
static void __page_pool_destroy(struct page_pool *pool)
{ … }
static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{ … }
static void page_pool_scrub(struct page_pool *pool)
{ … }
static int page_pool_release(struct page_pool *pool)
{ … }
static void page_pool_release_retry(struct work_struct *wq)
{ … }
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
const struct xdp_mem_info *mem)
{ … }
void page_pool_disable_direct_recycling(struct page_pool *pool)
{ … }
EXPORT_SYMBOL(…);
void page_pool_destroy(struct page_pool *pool)
{ … }
EXPORT_SYMBOL(…);
void page_pool_update_nid(struct page_pool *pool, int new_nid)
{ … }
EXPORT_SYMBOL(…);