linux/net/core/devmem.h

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Device memory TCP support
 *
 * Authors:	Mina Almasry <[email protected]>
 *		Willem de Bruijn <[email protected]>
 *		Kaiyuan Zhang <[email protected]>
 *
 */
#ifndef _NET_DEVMEM_H
#define _NET_DEVMEM_H

struct netlink_ext_ack;

struct net_devmem_dmabuf_binding {};

#if defined(CONFIG_NET_DEVMEM)
/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
 * this owner struct to keep track of some metadata necessary to create
 * allocations from this chunk.
 */
struct dmabuf_genpool_chunk_owner {};

void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
		       struct netlink_ext_ack *extack);
void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
				    struct net_devmem_dmabuf_binding *binding,
				    struct netlink_ext_ack *extack);
void dev_dmabuf_uninstall(struct net_device *dev);

static inline struct dmabuf_genpool_chunk_owner *
net_iov_owner(const struct net_iov *niov)
{}

static inline unsigned int net_iov_idx(const struct net_iov *niov)
{}

static inline struct net_devmem_dmabuf_binding *
net_iov_binding(const struct net_iov *niov)
{}

static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
{}

static inline u32 net_iov_binding_id(const struct net_iov *niov)
{}

static inline void
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
{}

static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
{}

struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
void net_devmem_free_dmabuf(struct net_iov *ppiov);

#else
struct net_devmem_dmabuf_binding;

static inline void
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
{
}

static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
		       struct netlink_ext_ack *extack)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
}

static inline int
net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
				struct net_devmem_dmabuf_binding *binding,
				struct netlink_ext_ack *extack)

{
	return -EOPNOTSUPP;
}

static inline void dev_dmabuf_uninstall(struct net_device *dev)
{
}

static inline struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
	return NULL;
}

static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
{
}

static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
{
	return 0;
}

static inline u32 net_iov_binding_id(const struct net_iov *niov)
{
	return 0;
}
#endif

#endif /* _NET_DEVMEM_H */