linux/net/sched/sch_fq_codel.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Fair Queue CoDel discipline
 *
 *  Copyright (C) 2012,2015 Eric Dumazet <[email protected]>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/codel.h>
#include <net/codel_impl.h>
#include <net/codel_qdisc.h>

/*	Fair Queue CoDel.
 *
 * Principles :
 * Packets are classified (internal classifier or external) on flows.
 * This is a Stochastic model (as we use a hash, several flows
 *			       might be hashed on same slot)
 * Each flow has a CoDel managed queue.
 * Flows are linked onto two (Round Robin) lists,
 * so that new flows have priority on old ones.
 *
 * For a given flow, packets are not reordered (CoDel uses a FIFO)
 * head drops only.
 * ECN capability is on by default.
 * Low memory footprint (64 bytes per flow)
 */

struct fq_codel_flow {}; /* please try to keep this structure <= 64 bytes */

struct fq_codel_sched_data {};

static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
				  struct sk_buff *skb)
{}

static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
				      int *qerr)
{}

/* helper functions : might be changed when/if skb use a standard list_head */

/* remove one skb from head of slot queue */
static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
{}

/* add skb to flow queue (tail add) */
static inline void flow_queue_add(struct fq_codel_flow *flow,
				  struct sk_buff *skb)
{}

static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
				  struct sk_buff **to_free)
{}

static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			    struct sk_buff **to_free)
{}

/* This is the specific function called from codel_dequeue()
 * to dequeue a packet from queue. Note: backlog is handled in
 * codel, we dont need to reduce it here.
 */
static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{}

static void drop_func(struct sk_buff *skb, void *ctx)
{}

static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
{}

static void fq_codel_flow_purge(struct fq_codel_flow *flow)
{}

static void fq_codel_reset(struct Qdisc *sch)
{}

static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] =;

static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
			   struct netlink_ext_ack *extack)
{}

static void fq_codel_destroy(struct Qdisc *sch)
{}

static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
			 struct netlink_ext_ack *extack)
{}

static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
{}

static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{}

static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
{}

static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
{}

static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{}

static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
{}

static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
					    struct netlink_ext_ack *extack)
{}

static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{}

static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				     struct gnet_dump *d)
{}

static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{}

static const struct Qdisc_class_ops fq_codel_class_ops =;

static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly =;
MODULE_ALIAS_NET_SCH();

static int __init fq_codel_module_init(void)
{}

static void __exit fq_codel_module_exit(void)
{}

module_init()
module_exit()
MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_DESCRIPTION();