linux/net/sched/sch_fq.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
 *
 *  Copyright (C) 2013-2023 Eric Dumazet <[email protected]>
 *
 *  Meant to be mostly used for locally generated traffic :
 *  Fast classification depends on skb->sk being set before reaching us.
 *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
 *  All packets belonging to a socket are considered as a 'flow'.
 *
 *  Flows are dynamically allocated and stored in a hash table of RB trees
 *  They are also part of one Round Robin 'queues' (new or old flows)
 *
 *  Burst avoidance (aka pacing) capability :
 *
 *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
 *  bunch of packets, and this packet scheduler adds delay between
 *  packets to respect rate limitation.
 *
 *  enqueue() :
 *   - lookup one RB tree (out of 1024 or more) to find the flow.
 *     If non existent flow, create it, add it to the tree.
 *     Add skb to the per flow list of skb (fifo).
 *   - Use a special fifo for high prio packets
 *
 *  dequeue() : serves flows in Round Robin
 *  Note : When a flow becomes empty, we do not immediately remove it from
 *  rb trees, for performance reasons (its expected to send additional packets,
 *  or SLAB cache will reuse socket for another flow)
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/hash.h>
#include <linux/prefetch.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/tcp.h>

struct fq_skb_cb {};

static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
{}

/*
 * Per flow structure, dynamically allocated.
 * If packets have monotically increasing time_to_send, they are placed in O(1)
 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
 */
struct fq_flow {};

struct fq_flow_head {};

struct fq_perband_flows {};

#define FQ_PRIO2BAND_CRUMB_SIZE

struct fq_sched_data {};

/* return the i-th 2-bit value ("crumb") */
static u8 fq_prio2band(const u8 *prio2band, unsigned int prio)
{}

/*
 * f->tail and f->age share the same location.
 * We can use the low order bit to differentiate if this location points
 * to a sk_buff or contains a jiffies value, if we force this value to be odd.
 * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2
 */
static void fq_flow_set_detached(struct fq_flow *f)
{}

static bool fq_flow_is_detached(const struct fq_flow *f)
{}

/* special value to mark a throttled flow (not on old/new list) */
static struct fq_flow throttled;

static bool fq_flow_is_throttled(const struct fq_flow *f)
{}

enum new_flow {};

static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow,
			     enum new_flow list_sel)
{}

static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
{}

static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
{}


static struct kmem_cache *fq_flow_cachep __read_mostly;


/* limit number of collected flows per round */
#define FQ_GC_MAX
#define FQ_GC_AGE

static bool fq_gc_candidate(const struct fq_flow *f)
{}

static void fq_gc(struct fq_sched_data *q,
		  struct rb_root *root,
		  struct sock *sk)
{}

/* Fast path can be used if :
 * 1) Packet tstamp is in the past.
 * 2) FQ qlen == 0   OR
 *   (no flow is currently eligible for transmit,
 *    AND fast path queue has less than 8 packets)
 * 3) No SO_MAX_PACING_RATE on the socket (if any).
 * 4) No @maxrate attribute on this qdisc,
 *
 * FQ can not use generic TCQ_F_CAN_BYPASS infrastructure.
 */
static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
			      u64 now)
{}

static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
				   u64 now)
{}

static struct sk_buff *fq_peek(struct fq_flow *flow)
{}

static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
			  struct sk_buff *skb)
{}

/* Remove one skb from flow queue.
 * This skb must be the return value of prior fq_peek().
 */
static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
			   struct sk_buff *skb)
{}

static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
{}

static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
				     const struct fq_sched_data *q, u64 now)
{}

static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
		      struct sk_buff **to_free)
{}

static void fq_check_throttled(struct fq_sched_data *q, u64 now)
{}

static struct fq_flow_head *fq_pband_head_select(struct fq_perband_flows *pband)
{}

static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{}

static void fq_flow_purge(struct fq_flow *flow)
{}

static void fq_reset(struct Qdisc *sch)
{}

static void fq_rehash(struct fq_sched_data *q,
		      struct rb_root *old_array, u32 old_log,
		      struct rb_root *new_array, u32 new_log)
{}

static void fq_free(void *addr)
{}

static int fq_resize(struct Qdisc *sch, u32 log)
{}

static const struct netlink_range_validation iq_range =;

static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] =;

/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
static void fq_prio2band_compress_crumb(const u8 *in, u8 *out)
{}

static void fq_prio2band_decompress_crumb(const u8 *in, u8 *out)
{}

static int fq_load_weights(struct fq_sched_data *q,
			   const struct nlattr *attr,
			   struct netlink_ext_ack *extack)
{}

static int fq_load_priomap(struct fq_sched_data *q,
			   const struct nlattr *attr,
			   struct netlink_ext_ack *extack)
{}

static int fq_change(struct Qdisc *sch, struct nlattr *opt,
		     struct netlink_ext_ack *extack)
{}

static void fq_destroy(struct Qdisc *sch)
{}

static int fq_init(struct Qdisc *sch, struct nlattr *opt,
		   struct netlink_ext_ack *extack)
{}

static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{}

static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{}

static struct Qdisc_ops fq_qdisc_ops __read_mostly =;
MODULE_ALIAS_NET_SCH();

static int __init fq_module_init(void)
{}

static void __exit fq_module_exit(void)
{}

module_init()
module_exit()
MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_DESCRIPTION();