linux/net/core/page_pool.c

/* SPDX-License-Identifier: GPL-2.0
 *
 * page_pool.c
 *	Author:	Jesper Dangaard Brouer <[email protected]>
 *	Copyright (C) 2016 Red Hat, Inc.
 */

#include <linux/error-injection.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>

#include <net/page_pool/helpers.h>
#include <net/xdp.h>

#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
#include <linux/mm.h> /* for put_page() */
#include <linux/poison.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>

#include <trace/events/page_pool.h>

#include "page_pool_priv.h"

#define DEFER_TIME
#define DEFER_WARN_INTERVAL

#define BIAS_MAX

#ifdef CONFIG_PAGE_POOL_STATS
static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);

/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat)
/* recycle_stat_inc is safe to use when preemption is possible. */
#define recycle_stat_inc(pool, __stat)

#define recycle_stat_add(pool, __stat, val)

static const char pp_stats[][ETH_GSTRING_LEN] =;

/**
 * page_pool_get_stats() - fetch page pool stats
 * @pool:	pool from which page was allocated
 * @stats:	struct page_pool_stats to fill in
 *
 * Retrieve statistics about the page_pool. This API is only available
 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
 * A pointer to a caller allocated struct page_pool_stats structure
 * is passed to this API which is filled in. The caller can then report
 * those stats to the user (perhaps via ethtool, debugfs, etc.).
 */
bool page_pool_get_stats(const struct page_pool *pool,
			 struct page_pool_stats *stats)
{}
EXPORT_SYMBOL();

u8 *page_pool_ethtool_stats_get_strings(u8 *data)
{}
EXPORT_SYMBOL();

int page_pool_ethtool_stats_get_count(void)
{}
EXPORT_SYMBOL();

u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
{}
EXPORT_SYMBOL();

#else
#define alloc_stat_inc
#define recycle_stat_inc
#define recycle_stat_add
#endif

static bool page_pool_producer_lock(struct page_pool *pool)
	__acquires(&pool->ring.producer_lock)
{}

static void page_pool_producer_unlock(struct page_pool *pool,
				      bool in_softirq)
	__releases(&pool->ring.producer_lock)
{}

static void page_pool_struct_check(void)
{}

static int page_pool_init(struct page_pool *pool,
			  const struct page_pool_params *params,
			  int cpuid)
{}

static void page_pool_uninit(struct page_pool *pool)
{}

/**
 * page_pool_create_percpu() - create a page pool for a given cpu.
 * @params: parameters, see struct page_pool_params
 * @cpuid: cpu identifier
 */
struct page_pool *
page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
{}
EXPORT_SYMBOL();

/**
 * page_pool_create() - create a page pool
 * @params: parameters, see struct page_pool_params
 */
struct page_pool *page_pool_create(const struct page_pool_params *params)
{}
EXPORT_SYMBOL();

static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);

static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
{}

/* fast path */
static netmem_ref __page_pool_get_cached(struct page_pool *pool)
{}

static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
					    netmem_ref netmem,
					    u32 dma_sync_size)
{}

static __always_inline void
page_pool_dma_sync_for_device(const struct page_pool *pool,
			      netmem_ref netmem,
			      u32 dma_sync_size)
{}

static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
{}

static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
{}

static void page_pool_clear_pp_info(netmem_ref netmem)
{}

static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
						 gfp_t gfp)
{}

/* slow path */
static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
							gfp_t gfp)
{}

/* For using page_pool replace: alloc_pages() API calls, but provide
 * synchronization guarantee for allocation side.
 */
netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
{}
EXPORT_SYMBOL();

struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
{}
EXPORT_SYMBOL();
ALLOW_ERROR_INJECTION();

/* Calculate distance between two u32 values, valid if distance is below 2^(31)
 *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
 */
#define _distance(a, b)

s32 page_pool_inflight(const struct page_pool *pool, bool strict)
{}

static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
							 netmem_ref netmem)
{}

/* Disconnects a page (from a page_pool).  API users can have a need
 * to disconnect a page (from a page_pool), to allow it to be used as
 * a regular page (that will eventually be returned to the normal
 * page-allocator via put_page).
 */
void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
{}

static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
{}

/* Only allow direct recycling in special circumstances, into the
 * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
 *
 * Caller must provide appropriate safe context.
 */
static bool page_pool_recycle_in_cache(netmem_ref netmem,
				       struct page_pool *pool)
{}

static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
{}

/* If the page refcnt == 1, this will try to recycle the page.
 * If pool->dma_sync is set, we'll try to sync the DMA area for
 * the configured size min(dma_sync_size, pool->max_len).
 * If the page refcnt != 1, then the page will be returned to memory
 * subsystem.
 */
static __always_inline netmem_ref
__page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
		     unsigned int dma_sync_size, bool allow_direct)
{}

static bool page_pool_napi_local(const struct page_pool *pool)
{}

void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
				  unsigned int dma_sync_size, bool allow_direct)
{}
EXPORT_SYMBOL();

void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
				unsigned int dma_sync_size, bool allow_direct)
{}
EXPORT_SYMBOL();

/**
 * page_pool_put_page_bulk() - release references on multiple pages
 * @pool:	pool from which pages were allocated
 * @data:	array holding page pointers
 * @count:	number of pages in @data
 *
 * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring
 * producer lock. If the ptr_ring is full, page_pool_put_page_bulk()
 * will release leftover pages to the page allocator.
 * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx
 * completion loop for the XDP_REDIRECT use case.
 *
 * Please note the caller must not use data area after running
 * page_pool_put_page_bulk(), as this function overwrites it.
 */
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
			     int count)
{}
EXPORT_SYMBOL();

static netmem_ref page_pool_drain_frag(struct page_pool *pool,
				       netmem_ref netmem)
{}

static void page_pool_free_frag(struct page_pool *pool)
{}

netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
				       unsigned int *offset, unsigned int size,
				       gfp_t gfp)
{}
EXPORT_SYMBOL();

struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
				  unsigned int size, gfp_t gfp)
{}
EXPORT_SYMBOL();

static void page_pool_empty_ring(struct page_pool *pool)
{}

static void __page_pool_destroy(struct page_pool *pool)
{}

static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{}

static void page_pool_scrub(struct page_pool *pool)
{}

static int page_pool_release(struct page_pool *pool)
{}

static void page_pool_release_retry(struct work_struct *wq)
{}

void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
			   const struct xdp_mem_info *mem)
{}

void page_pool_disable_direct_recycling(struct page_pool *pool)
{}
EXPORT_SYMBOL();

void page_pool_destroy(struct page_pool *pool)
{}
EXPORT_SYMBOL();

/* Caller must provide appropriate safe context, e.g. NAPI. */
void page_pool_update_nid(struct page_pool *pool, int new_nid)
{}
EXPORT_SYMBOL();