linux/net/sched/sch_sfq.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
 *
 * Authors:	Alexey Kuznetsov, <[email protected]>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/siphash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/red.h>


/*	Stochastic Fairness Queuing algorithm.
	=======================================

	Source:
	Paul E. McKenney "Stochastic Fairness Queuing",
	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.

	Paul E. McKenney "Stochastic Fairness Queuing",
	"Interworking: Research and Experience", v.2, 1991, p.113-131.


	See also:
	M. Shreedhar and George Varghese "Efficient Fair
	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.


	This is not the thing that is usually called (W)FQ nowadays.
	It does not use any timestamp mechanism, but instead
	processes queues in round-robin order.

	ADVANTAGE:

	- It is very cheap. Both CPU and memory requirements are minimal.

	DRAWBACKS:

	- "Stochastic" -> It is not 100% fair.
	When hash collisions occur, several flows are considered as one.

	- "Round-robin" -> It introduces larger delays than virtual clock
	based schemes, and should not be used for isolating interactive
	traffic	from non-interactive. It means, that this scheduler
	should be used as leaf of CBQ or P3, which put interactive traffic
	to higher priority band.

	We still need true WFQ for top level CSZ, but using WFQ
	for the best effort traffic is absolutely pointless:
	SFQ is superior for this purpose.

	IMPLEMENTATION:
	This implementation limits :
	- maximal queue length per flow to 127 packets.
	- max mtu to 2^18-1;
	- max 65408 flows,
	- number of hash buckets to 65536.

	It is easy to increase these values, but not in flight.  */

#define SFQ_MAX_DEPTH
#define SFQ_DEFAULT_FLOWS
#define SFQ_MAX_FLOWS
#define SFQ_EMPTY_SLOT
#define SFQ_DEFAULT_HASH_DIVISOR

/* We use 16 bits to store allot, and want to handle packets up to 64K
 * Scale allot by 8 (1<<3) so that no overflow occurs.
 */
#define SFQ_ALLOT_SHIFT
#define SFQ_ALLOT_SIZE(X)

/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
sfq_index;

/*
 * We dont use pointers to save space.
 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
 * are 'pointers' to dep[] array
 */
struct sfq_head {};

struct sfq_slot {};

struct sfq_sched_data {};

/*
 * sfq_head are either in a sfq_slot or in dep[] array
 */
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
{}

static unsigned int sfq_hash(const struct sfq_sched_data *q,
			     const struct sk_buff *skb)
{}

static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
				 int *qerr)
{}

/*
 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
 */
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{}

#define sfq_unlink(q, x, n, p)


static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
{}

static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
{}

/* helper functions : might be changed when/if skb use a standard list_head */

/* remove one skb from tail of slot queue */
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
{}

/* remove one skb from head of slot queue */
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
{}

static inline void slot_queue_init(struct sfq_slot *slot)
{}

/* add skb to slot queue (tail add) */
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
{}

static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
{}

/* Is ECN parameter configured */
static int sfq_prob_mark(const struct sfq_sched_data *q)
{}

/* Should packets over max threshold just be marked */
static int sfq_hard_mark(const struct sfq_sched_data *q)
{}

static int sfq_headdrop(const struct sfq_sched_data *q)
{}

static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{}

static struct sk_buff *
sfq_dequeue(struct Qdisc *sch)
{}

static void
sfq_reset(struct Qdisc *sch)
{}

/*
 * When q->perturbation is changed, we rehash all queued skbs
 * to avoid OOO (Out Of Order) effects.
 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
 * counters.
 */
static void sfq_rehash(struct Qdisc *sch)
{}

static void sfq_perturbation(struct timer_list *t)
{}

static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
{}

static void *sfq_alloc(size_t sz)
{}

static void sfq_free(void *addr)
{}

static void sfq_destroy(struct Qdisc *sch)
{}

static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
		    struct netlink_ext_ack *extack)
{}

static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{}

static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
{}

static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
{}

static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{}

static void sfq_unbind(struct Qdisc *q, unsigned long cl)
{}

static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
				       struct netlink_ext_ack *extack)
{}

static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{}

static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				struct gnet_dump *d)
{}

static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{}

static const struct Qdisc_class_ops sfq_class_ops =;

static struct Qdisc_ops sfq_qdisc_ops __read_mostly =;
MODULE_ALIAS_NET_SCH();

static int __init sfq_module_init(void)
{}
static void __exit sfq_module_exit(void)
{}
module_init()
module_exit()
MODULE_LICENSE();
MODULE_DESCRIPTION();