linux/net/sched/sch_generic.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * net/sched/sch_generic.c	Generic packet scheduler routines.
 *
 * Authors:	Alexey Kuznetsov, <[email protected]>
 *              Jamal Hadi Salim, <[email protected]> 990601
 *              - Ingress support
 */

#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/skb_array.h>
#include <linux/if_macvlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <net/hotdata.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>

/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops =;
EXPORT_SYMBOL();

static void qdisc_maybe_clear_missed(struct Qdisc *q,
				     const struct netdev_queue *txq)
{}

/* Main transmission queue. */

/* Modifications to data participating in scheduling must be protected with
 * qdisc_lock(qdisc) spinlock.
 *
 * The idea is the following:
 * - enqueue, dequeue are serialized via qdisc root lock
 * - ingress filtering is also serialized via qdisc root lock
 * - updates to tree and tree walking are only done under the rtnl mutex.
 */

#define SKB_XOFF_MAGIC

static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{}

static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
{}

static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
					     struct sk_buff *skb)
{}

static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{}

static void try_bulk_dequeue_skb(struct Qdisc *q,
				 struct sk_buff *skb,
				 const struct netdev_queue *txq,
				 int *packets)
{}

/* This variant of try_bulk_dequeue_skb() makes sure
 * all skbs in the chain are for the same txq
 */
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
				      struct sk_buff *skb,
				      int *packets)
{}

/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 * A requeued skb (via q->gso_skb) can also be a SKB list.
 */
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
				   int *packets)
{}

/*
 * Transmit possibly several skbs, and handle the return status as
 * required. Owning qdisc running bit guarantees that only one CPU
 * can execute this function.
 *
 * Returns to the caller:
 *				false  - hardware queue frozen backoff
 *				true   - feel free to send more pkts
 */
bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
		     struct net_device *dev, struct netdev_queue *txq,
		     spinlock_t *root_lock, bool validate)
{}

/*
 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 *
 * running seqcount guarantees only one CPU can process
 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 * this queue.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
static inline bool qdisc_restart(struct Qdisc *q, int *packets)
{}

void __qdisc_run(struct Qdisc *q)
{}

unsigned long dev_trans_start(struct net_device *dev)
{}
EXPORT_SYMBOL();

static void netif_freeze_queues(struct net_device *dev)
{}

void netif_tx_lock(struct net_device *dev)
{}
EXPORT_SYMBOL();

static void netif_unfreeze_queues(struct net_device *dev)
{}

void netif_tx_unlock(struct net_device *dev)
{}
EXPORT_SYMBOL();

static void dev_watchdog(struct timer_list *t)
{}

void __netdev_watchdog_up(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

static void dev_watchdog_up(struct net_device *dev)
{}

static void dev_watchdog_down(struct net_device *dev)
{}

/**
 *	netif_carrier_on - set carrier
 *	@dev: network device
 *
 * Device has detected acquisition of carrier.
 */
void netif_carrier_on(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	netif_carrier_off - clear carrier
 *	@dev: network device
 *
 * Device has detected loss of carrier.
 */
void netif_carrier_off(struct net_device *dev)
{}
EXPORT_SYMBOL();

/**
 *	netif_carrier_event - report carrier state event
 *	@dev: network device
 *
 * Device has detected a carrier event but the carrier state wasn't changed.
 * Use in drivers when querying carrier state asynchronously, to avoid missing
 * events (link flaps) if link recovers before it's queried.
 */
void netif_carrier_event(struct net_device *dev)
{}
EXPORT_SYMBOL_GPL();

/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
   under all circumstances. It is difficult to invent anything faster or
   cheaper.
 */

static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
			struct sk_buff **to_free)
{}

static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
{}

struct Qdisc_ops noop_qdisc_ops __read_mostly =;

static struct netdev_queue noop_netdev_queue =;

struct Qdisc noop_qdisc =;
EXPORT_SYMBOL();

static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
			struct netlink_ext_ack *extack)
{}

struct Qdisc_ops noqueue_qdisc_ops __read_mostly =;

const u8 sch_default_prio2band[TC_PRIO_MAX + 1] =;
EXPORT_SYMBOL();

/* 3-band FIFO queue: old style, but should be a bit faster than
   generic prio+fifo combination.
 */

#define PFIFO_FAST_BANDS

/*
 * Private data for a pfifo_fast scheduler containing:
 *	- rings for priority bands
 */
struct pfifo_fast_priv {};

static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
					  int band)
{}

static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
			      struct sk_buff **to_free)
{}

static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{}

static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{}

static void pfifo_fast_reset(struct Qdisc *qdisc)
{}

static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{}

static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
			   struct netlink_ext_ack *extack)
{}

static void pfifo_fast_destroy(struct Qdisc *sch)
{}

static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
					  unsigned int new_len)
{}

struct Qdisc_ops pfifo_fast_ops __read_mostly =;
EXPORT_SYMBOL();

static struct lock_class_key qdisc_tx_busylock;

struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
			  const struct Qdisc_ops *ops,
			  struct netlink_ext_ack *extack)
{}

struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
				const struct Qdisc_ops *ops,
				unsigned int parentid,
				struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

/* Under qdisc_lock(qdisc) and BH! */

void qdisc_reset(struct Qdisc *qdisc)
{}
EXPORT_SYMBOL();

void qdisc_free(struct Qdisc *qdisc)
{}

static void qdisc_free_cb(struct rcu_head *head)
{}

static void __qdisc_destroy(struct Qdisc *qdisc)
{}

void qdisc_destroy(struct Qdisc *qdisc)
{}

void qdisc_put(struct Qdisc *qdisc)
{}
EXPORT_SYMBOL();

/* Version of qdisc_put() that is called with rtnl mutex unlocked.
 * Intended to be used as optimization, this function only takes rtnl lock if
 * qdisc reference counter reached zero.
 */

void qdisc_put_unlocked(struct Qdisc *qdisc)
{}
EXPORT_SYMBOL();

/* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
			      struct Qdisc *qdisc)
{}
EXPORT_SYMBOL();

static void shutdown_scheduler_queue(struct net_device *dev,
				     struct netdev_queue *dev_queue,
				     void *_qdisc_default)
{}

static void attach_one_default_qdisc(struct net_device *dev,
				     struct netdev_queue *dev_queue,
				     void *_unused)
{}

static void attach_default_qdiscs(struct net_device *dev)
{}

static void transition_one_qdisc(struct net_device *dev,
				 struct netdev_queue *dev_queue,
				 void *_need_watchdog)
{}

void dev_activate(struct net_device *dev)
{}
EXPORT_SYMBOL();

static void qdisc_deactivate(struct Qdisc *qdisc)
{}

static void dev_deactivate_queue(struct net_device *dev,
				 struct netdev_queue *dev_queue,
				 void *_qdisc_default)
{}

static void dev_reset_queue(struct net_device *dev,
			    struct netdev_queue *dev_queue,
			    void *_unused)
{}

static bool some_qdisc_is_busy(struct net_device *dev)
{}

/**
 * 	dev_deactivate_many - deactivate transmissions on several devices
 * 	@head: list of devices to deactivate
 *
 *	This function returns only when all outstanding transmissions
 *	have completed, unless all devices are in dismantle phase.
 */
void dev_deactivate_many(struct list_head *head)
{}

void dev_deactivate(struct net_device *dev)
{}
EXPORT_SYMBOL();

static int qdisc_change_tx_queue_len(struct net_device *dev,
				     struct netdev_queue *dev_queue)
{}

void dev_qdisc_change_real_num_tx(struct net_device *dev,
				  unsigned int new_real_tx)
{}

void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
{}
EXPORT_SYMBOL();

int dev_qdisc_change_tx_queue_len(struct net_device *dev)
{}

static void dev_init_scheduler_queue(struct net_device *dev,
				     struct netdev_queue *dev_queue,
				     void *_qdisc)
{}

void dev_init_scheduler(struct net_device *dev)
{}

void dev_shutdown(struct net_device *dev)
{}

/**
 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
 * @rate:   Rate to compute reciprocal division values of
 * @mult:   Multiplier for reciprocal division
 * @shift:  Shift for reciprocal division
 *
 * The multiplier and shift for reciprocal division by rate are stored
 * in mult and shift.
 *
 * The deal here is to replace a divide by a reciprocal one
 * in fast path (a reciprocal divide is a multiply and a shift)
 *
 * Normal formula would be :
 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
 *
 * We compute mult/shift to use instead :
 *  time_in_ns = (len * mult) >> shift;
 *
 * We try to get the highest possible mult value for accuracy,
 * but have to make sure no overflows will ever happen.
 *
 * reciprocal_value() is not used here it doesn't handle 64-bit values.
 */
static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
{}

void psched_ratecfg_precompute(struct psched_ratecfg *r,
			       const struct tc_ratespec *conf,
			       u64 rate64)
{}
EXPORT_SYMBOL();

void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
{}
EXPORT_SYMBOL();

void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
			  struct tcf_proto *tp_head)
{}
EXPORT_SYMBOL();

void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
				struct tcf_block *block)
{}
EXPORT_SYMBOL();

void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
			  struct mini_Qdisc __rcu **p_miniq)
{}
EXPORT_SYMBOL();