linux/net/sched/sch_sfb.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * net/sched/sch_sfb.c	  Stochastic Fair Blue
 *
 * Copyright (c) 2008-2011 Juliusz Chroboczek <[email protected]>
 * Copyright (c) 2011 Eric Dumazet <[email protected]>
 *
 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
 * A New Class of Active Queue Management Algorithms.
 * U. Michigan CSE-TR-387-99, April 1999.
 *
 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/siphash.h>
#include <net/ip.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/inet_ecn.h>

/*
 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
 * This implementation uses L = 8 and N = 16
 * This permits us to split one 32bit hash (provided per packet by rxhash or
 * external classifier) into 8 subhashes of 4 bits.
 */
#define SFB_BUCKET_SHIFT
#define SFB_NUMBUCKETS
#define SFB_BUCKET_MASK
#define SFB_LEVELS

/* SFB algo uses a virtual queue, named "bin" */
struct sfb_bucket {};

/* We use a double buffering right before hash change
 * (Section 4.4 of SFB reference : moving hash functions)
 */
struct sfb_bins {};

struct sfb_sched_data {};

/*
 * Each queued skb might be hashed on one or two bins
 * We store in skb_cb the two hash values.
 * (A zero value means double buffering was not used)
 */
struct sfb_skb_cb {};

static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
{}

/*
 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
 * If using external classifier, hash comes from the classid.
 */
static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
{}

/* Probabilities are coded as Q0.16 fixed-point values,
 * with 0xFFFF representing 65535/65536 (almost 1.0)
 * Addition and subtraction are saturating in [0, 65535]
 */
static u32 prob_plus(u32 p1, u32 p2)
{}

static u32 prob_minus(u32 p1, u32 p2)
{}

static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
{}

static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
{}

static void decrement_one_qlen(u32 sfbhash, u32 slot,
			       struct sfb_sched_data *q)
{}

static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
{}

static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
{}

static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
{}

static void sfb_zero_all_buckets(struct sfb_sched_data *q)
{}

/*
 * compute max qlen, max p_mark, and avg p_mark
 */
static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
{}


static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
{}

static void sfb_swap_slot(struct sfb_sched_data *q)
{}

/* Non elastic flows are allowed to use part of the bandwidth, expressed
 * in "penalty_rate" packets per second, with "penalty_burst" burst
 */
static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
{}

static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
			 int *qerr, u32 *salt)
{}

static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
		       struct sk_buff **to_free)
{}

static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
{}

static struct sk_buff *sfb_peek(struct Qdisc *sch)
{}

/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */

static void sfb_reset(struct Qdisc *sch)
{}

static void sfb_destroy(struct Qdisc *sch)
{}

static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] =;

static const struct tc_sfb_qopt sfb_default_ops =;

static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
		      struct netlink_ext_ack *extack)
{}

static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
		    struct netlink_ext_ack *extack)
{}

static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
{}

static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{}

static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{}

static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
		     struct Qdisc **old, struct netlink_ext_ack *extack)
{}

static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
{}

static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
{}

static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
{}

static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
			    struct nlattr **tca, unsigned long *arg,
			    struct netlink_ext_ack *extack)
{}

static int sfb_delete(struct Qdisc *sch, unsigned long cl,
		      struct netlink_ext_ack *extack)
{}

static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{}

static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
				       struct netlink_ext_ack *extack)
{}

static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{}


static const struct Qdisc_class_ops sfb_class_ops =;

static struct Qdisc_ops sfb_qdisc_ops __read_mostly =;
MODULE_ALIAS_NET_SCH();

static int __init sfb_module_init(void)
{}

static void __exit sfb_module_exit(void)
{}

module_init()
module_exit()

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_AUTHOR();
MODULE_LICENSE();