linux/net/sched/sch_taprio.c

// SPDX-License-Identifier: GPL-2.0

/* net/sched/sch_taprio.c	 Time Aware Priority Scheduler
 *
 * Authors:	Vinicius Costa Gomes <[email protected]>
 *
 */

#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/time.h>
#include <net/gso.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/sch_generic.h>
#include <net/sock.h>
#include <net/tcp.h>

#define TAPRIO_STAT_NOT_SET

#include "sch_mqprio_lib.h"

static LIST_HEAD(taprio_list);
static struct static_key_false taprio_have_broken_mqprio;
static struct static_key_false taprio_have_working_mqprio;

#define TAPRIO_ALL_GATES_OPEN

#define TXTIME_ASSIST_IS_ENABLED(flags)
#define FULL_OFFLOAD_IS_ENABLED(flags)
#define TAPRIO_SUPPORTED_FLAGS
#define TAPRIO_FLAGS_INVALID

struct sched_entry {};

struct sched_gate_list {};

struct taprio_sched {};

struct __tc_taprio_qopt_offload {};

static void taprio_calculate_gate_durations(struct taprio_sched *q,
					    struct sched_gate_list *sched)
{}

static bool taprio_entry_allows_tx(ktime_t skb_end_time,
				   struct sched_entry *entry, int tc)
{}

static ktime_t sched_base_time(const struct sched_gate_list *sched)
{}

static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
{}

static ktime_t taprio_get_time(const struct taprio_sched *q)
{}

static void taprio_free_sched_cb(struct rcu_head *head)
{}

static void switch_schedules(struct taprio_sched *q,
			     struct sched_gate_list **admin,
			     struct sched_gate_list **oper)
{}

/* Get how much time has been already elapsed in the current cycle. */
static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
{}

static ktime_t get_interval_end_time(struct sched_gate_list *sched,
				     struct sched_gate_list *admin,
				     struct sched_entry *entry,
				     ktime_t intv_start)
{}

static int length_to_duration(struct taprio_sched *q, int len)
{}

static int duration_to_length(struct taprio_sched *q, u64 duration)
{}

/* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the
 * q->max_sdu[] requested by the user and the max_sdu dynamically determined by
 * the maximum open gate durations at the given link speed.
 */
static void taprio_update_queue_max_sdu(struct taprio_sched *q,
					struct sched_gate_list *sched,
					struct qdisc_size_table *stab)
{}

/* Returns the entry corresponding to next available interval. If
 * validate_interval is set, it only validates whether the timestamp occurs
 * when the gate corresponding to the skb's traffic class is open.
 */
static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
						  struct Qdisc *sch,
						  struct sched_gate_list *sched,
						  struct sched_gate_list *admin,
						  ktime_t time,
						  ktime_t *interval_start,
						  ktime_t *interval_end,
						  bool validate_interval)
{}

static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
{}

/* This returns the tstamp value set by TCP in terms of the set clock. */
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
{}

/* There are a few scenarios where we will have to modify the txtime from
 * what is read from next_txtime in sched_entry. They are:
 * 1. If txtime is in the past,
 *    a. The gate for the traffic class is currently open and packet can be
 *       transmitted before it closes, schedule the packet right away.
 *    b. If the gate corresponding to the traffic class is going to open later
 *       in the cycle, set the txtime of packet to the interval start.
 * 2. If txtime is in the future, there are packets corresponding to the
 *    current traffic class waiting to be transmitted. So, the following
 *    possibilities exist:
 *    a. We can transmit the packet before the window containing the txtime
 *       closes.
 *    b. The window might close before the transmission can be completed
 *       successfully. So, schedule the packet in the next open window.
 */
static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
{}

/* Devices with full offload are expected to honor this in hardware */
static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch,
					     struct sk_buff *skb)
{}

static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
			      struct Qdisc *child, struct sk_buff **to_free)
{}

static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
				    struct Qdisc *child,
				    struct sk_buff **to_free)
{}

/* Will not be called in the full offload case, since the TX queues are
 * attached to the Qdisc created using qdisc_create_dflt()
 */
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			  struct sk_buff **to_free)
{}

static struct sk_buff *taprio_peek(struct Qdisc *sch)
{}

static void taprio_set_budgets(struct taprio_sched *q,
			       struct sched_gate_list *sched,
			       struct sched_entry *entry)
{}

/* When an skb is sent, it consumes from the budget of all traffic classes */
static int taprio_update_budgets(struct sched_entry *entry, size_t len,
				 int tc_consumed, int num_tc)
{}

static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
					       struct sched_entry *entry,
					       u32 gate_mask)
{}

static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
{}

/* Prioritize higher traffic classes, and select among TXQs belonging to the
 * same TC using round robin
 */
static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
						  struct sched_entry *entry,
						  u32 gate_mask)
{}

/* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
 * class other than to determine whether the gate is open or not
 */
static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
						   struct sched_entry *entry,
						   u32 gate_mask)
{}

/* Will not be called in the full offload case, since the TX queues are
 * attached to the Qdisc created using qdisc_create_dflt()
 */
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
{}

static bool should_restart_cycle(const struct sched_gate_list *oper,
				 const struct sched_entry *entry)
{}

static bool should_change_schedules(const struct sched_gate_list *admin,
				    const struct sched_gate_list *oper,
				    ktime_t end_time)
{}

static enum hrtimer_restart advance_sched(struct hrtimer *timer)
{}

static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] =;

static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] =;

static const struct netlink_range_validation_signed taprio_cycle_time_range =;

static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] =;

static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
			    struct sched_entry *entry,
			    struct netlink_ext_ack *extack)
{}

static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
			     struct sched_entry *entry, int index,
			     struct netlink_ext_ack *extack)
{}

static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
			    struct sched_gate_list *sched,
			    struct netlink_ext_ack *extack)
{}

static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
				 struct sched_gate_list *new,
				 struct netlink_ext_ack *extack)
{}

static int taprio_parse_mqprio_opt(struct net_device *dev,
				   struct tc_mqprio_qopt *qopt,
				   struct netlink_ext_ack *extack,
				   u32 taprio_flags)
{}

static int taprio_get_start_time(struct Qdisc *sch,
				 struct sched_gate_list *sched,
				 ktime_t *start)
{}

static void setup_first_end_time(struct taprio_sched *q,
				 struct sched_gate_list *sched, ktime_t base)
{}

static void taprio_start_sched(struct Qdisc *sch,
			       ktime_t start, struct sched_gate_list *new)
{}

static void taprio_set_picos_per_byte(struct net_device *dev,
				      struct taprio_sched *q)
{}

static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
			       void *ptr)
{}

static void setup_txtime(struct taprio_sched *q,
			 struct sched_gate_list *sched, ktime_t base)
{}

static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
{}

struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
						  *offload)
{}
EXPORT_SYMBOL_GPL();

void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
{}
EXPORT_SYMBOL_GPL();

/* The function will only serve to keep the pointers to the "oper" and "admin"
 * schedules valid in relation to their base times, so when calling dump() the
 * users looks at the right schedules.
 * When using full offload, the admin configuration is promoted to oper at the
 * base_time in the PHC time domain.  But because the system time is not
 * necessarily in sync with that, we can't just trigger a hrtimer to call
 * switch_schedules at the right hardware time.
 * At the moment we call this by hand right away from taprio, but in the future
 * it will be useful to create a mechanism for drivers to notify taprio of the
 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
 * This is left as TODO.
 */
static void taprio_offload_config_changed(struct taprio_sched *q)
{}

static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
{}

static void taprio_sched_to_offload(struct net_device *dev,
				    struct sched_gate_list *sched,
				    struct tc_taprio_qopt_offload *offload,
				    const struct tc_taprio_caps *caps)
{}

static void taprio_detect_broken_mqprio(struct taprio_sched *q)
{}

static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
{}

static int taprio_enable_offload(struct net_device *dev,
				 struct taprio_sched *q,
				 struct sched_gate_list *sched,
				 struct netlink_ext_ack *extack)
{}

static int taprio_disable_offload(struct net_device *dev,
				  struct taprio_sched *q,
				  struct netlink_ext_ack *extack)
{}

/* If full offload is enabled, the only possible clockid is the net device's
 * PHC. For that reason, specifying a clockid through netlink is incorrect.
 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
 * in sync with the specified clockid via a user space daemon such as phc2sys.
 * For both software taprio and txtime-assist, the clockid is used for the
 * hrtimer that advances the schedule and hence mandatory.
 */
static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
				struct netlink_ext_ack *extack)
{}

static int taprio_parse_tc_entry(struct Qdisc *sch,
				 struct nlattr *opt,
				 u32 max_sdu[TC_QOPT_MAX_QUEUE],
				 u32 fp[TC_QOPT_MAX_QUEUE],
				 unsigned long *seen_tcs,
				 struct netlink_ext_ack *extack)
{}

static int taprio_parse_tc_entries(struct Qdisc *sch,
				   struct nlattr *opt,
				   struct netlink_ext_ack *extack)
{}

static int taprio_mqprio_cmp(const struct net_device *dev,
			     const struct tc_mqprio_qopt *mqprio)
{}

static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
			 struct netlink_ext_ack *extack)
{}

static void taprio_reset(struct Qdisc *sch)
{}

static void taprio_destroy(struct Qdisc *sch)
{}

static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
		       struct netlink_ext_ack *extack)
{}

static void taprio_attach(struct Qdisc *sch)
{}

static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
					     unsigned long cl)
{}

static int taprio_graft(struct Qdisc *sch, unsigned long cl,
			struct Qdisc *new, struct Qdisc **old,
			struct netlink_ext_ack *extack)
{}

static int dump_entry(struct sk_buff *msg,
		      const struct sched_entry *entry)
{}

static int dump_schedule(struct sk_buff *msg,
			 const struct sched_gate_list *root)
{}

static int taprio_dump_tc_entries(struct sk_buff *skb,
				  struct taprio_sched *q,
				  struct sched_gate_list *sched)
{}

static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
{}

static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d,
			      struct tc_taprio_qopt_offload *offload,
			      struct tc_taprio_qopt_stats *stats)
{}

static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{}

static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{}

static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
{}

static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
{}

static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
			     struct sk_buff *skb, struct tcmsg *tcm)
{}

static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				   struct gnet_dump *d)
	__releases(d->lock)
	__acquires(d->lock)
{}

static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{}

static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
						struct tcmsg *tcm)
{}

static const struct Qdisc_class_ops taprio_class_ops =;

static struct Qdisc_ops taprio_qdisc_ops __read_mostly =;
MODULE_ALIAS_NET_SCH();

static struct notifier_block taprio_device_notifier =;

static int __init taprio_module_init(void)
{}

static void __exit taprio_module_exit(void)
{}

module_init();
module_exit(taprio_module_exit);
MODULE_LICENSE();
MODULE_DESCRIPTION();