linux/net/core/devmem.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *      Devmem TCP
 *
 *      Authors:	Mina Almasry <[email protected]>
 *			Willem de Bruijn <[email protected]>
 *			Kaiyuan Zhang <[email protected]
 */

#include <linux/dma-buf.h>
#include <linux/genalloc.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <net/netdev_queues.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <trace/events/page_pool.h>

#include "devmem.h"
#include "mp_dmabuf_devmem.h"
#include "page_pool_priv.h"

/* Device memory support */

/* Protected by rtnl_lock() */
static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);

static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
					       struct gen_pool_chunk *chunk,
					       void *not_used)
{}

static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
{}

void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
{}

struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
{}

void net_devmem_free_dmabuf(struct net_iov *niov)
{}

void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
{}

int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
				    struct net_devmem_dmabuf_binding *binding,
				    struct netlink_ext_ack *extack)
{}

struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
		       struct netlink_ext_ack *extack)
{}

void dev_dmabuf_uninstall(struct net_device *dev)
{}

/*** "Dmabuf devmem memory provider" ***/

int mp_dmabuf_devmem_init(struct page_pool *pool)
{}

netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
{}

void mp_dmabuf_devmem_destroy(struct page_pool *pool)
{}

bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
{}