linux/include/net/gro.h

/* SPDX-License-Identifier: GPL-2.0-or-later */

#ifndef _NET_GRO_H
#define _NET_GRO_H

#include <linux/indirect_call_wrapper.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/skbuff.h>
#include <net/udp.h>
#include <net/hotdata.h>

struct napi_gro_cb {};

#define NAPI_GRO_CB(skb)

#define GRO_RECURSION_LIMIT
static inline int gro_recursion_inc_test(struct sk_buff *skb)
{}

gro_receive_t;
static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
					       struct list_head *head,
					       struct sk_buff *skb)
{}

gro_receive_sk_t;
static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
						  struct sock *sk,
						  struct list_head *head,
						  struct sk_buff *skb)
{}

static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{}

static inline unsigned int skb_gro_len(const struct sk_buff *skb)
{}

static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
{}

static inline void *skb_gro_header_fast(const struct sk_buff *skb,
					unsigned int offset)
{}

static inline bool skb_gro_may_pull(const struct sk_buff *skb,
				    unsigned int hlen)
{}

static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
					unsigned int offset)
{}

static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
				   unsigned int offset)
{}

static inline int skb_gro_receive_network_offset(const struct sk_buff *skb)
{}

static inline void *skb_gro_network_header(const struct sk_buff *skb)
{}

static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
					     int proto)
{}

static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
					const void *start, unsigned int len)
{}

/* GRO checksum functions. These are logical equivalents of the normal
 * checksum functions (in skbuff.h) except that they operate on the GRO
 * offsets and fields in sk_buff.
 */

__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);

static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
{}

static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
						      bool zero_okay,
						      __sum16 check)
{}

static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
							   __wsum psum)
{}

static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
{}

#define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
				    compute_pseudo)

#define skb_gro_checksum_validate(skb, proto, compute_pseudo)

#define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
					     compute_pseudo)

#define skb_gro_checksum_simple_validate(skb)

static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
{}

static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
					      __wsum pseudo)
{}

#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)

struct gro_remcsum {};

static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
{}

static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
					    unsigned int off, size_t hdrlen,
					    int start, int offset,
					    struct gro_remcsum *grc,
					    bool nopartial)
{}

static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
					   struct gro_remcsum *grc)
{}

#ifdef CONFIG_XFRM_OFFLOAD
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{}
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
					       struct sk_buff *pp,
					       int flush,
					       struct gro_remcsum *grc)
{}
#else
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{
	NAPI_GRO_CB(skb)->flush |= flush;
}
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
					       struct sk_buff *pp,
					       int flush,
					       struct gro_remcsum *grc)
{
	NAPI_GRO_CB(skb)->flush |= flush;
	skb_gro_remcsum_cleanup(skb, grc);
	skb->remcsum_offload = 0;
}
#endif

INDIRECT_CALLABLE_DECLARE();
INDIRECT_CALLABLE_DECLARE();
INDIRECT_CALLABLE_DECLARE();
INDIRECT_CALLABLE_DECLARE();

INDIRECT_CALLABLE_DECLARE();
INDIRECT_CALLABLE_DECLARE();

INDIRECT_CALLABLE_DECLARE();
INDIRECT_CALLABLE_DECLARE();

#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb)

struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
				struct udphdr *uh, struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);

static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{}

static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
					    int proto)
{}

static inline int inet_gro_flush(const struct iphdr *iph, const struct iphdr *iph2,
				 struct sk_buff *p, bool outer)
{}

static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr *iph2)
{}

static inline int __gro_receive_network_flush(const void *th, const void *th2,
					      struct sk_buff *p, const u16 diff,
					      bool outer)
{}

static inline int gro_receive_network_flush(const void *th, const void *th2,
					    struct sk_buff *p)
{}

int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);

/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
static inline void gro_normal_list(struct napi_struct *napi)
{}

/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
 * pass the whole batch up to the stack.
 */
static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
{}

/* This function is the alternative of 'inet_iif' and 'inet_sdif'
 * functions in case we can not rely on fields of IPCB.
 *
 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
 * The caller must hold the RCU read lock.
 */
static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
{}

/* This function is the alternative of 'inet6_iif' and 'inet6_sdif'
 * functions in case we can not rely on fields of IP6CB.
 *
 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
 * The caller must hold the RCU read lock.
 */
static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
{}

struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);

#endif /* _NET_GRO_H */