1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/math64.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
25 static LIST_HEAD(taprio_list);
26 static DEFINE_SPINLOCK(taprio_list_lock);
28 #define TAPRIO_ALL_GATES_OPEN -1
31 struct list_head list;
33 /* The instant that this entry "closes" and the next one
34 * should open, the qdisc will make some effort so that no
35 * packet leaves after this time.
45 struct sched_gate_list {
47 struct list_head entries;
49 ktime_t cycle_close_time;
51 s64 cycle_time_extension;
56 struct Qdisc **qdiscs;
59 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
60 * speeds it's sub-nanoseconds per byte
63 /* Protects the update side of the RCU protected current_entry */
64 spinlock_t current_entry_lock;
65 struct sched_entry __rcu *current_entry;
66 struct sched_gate_list __rcu *oper_sched;
67 struct sched_gate_list __rcu *admin_sched;
68 ktime_t (*get_time)(void);
69 struct hrtimer advance_timer;
70 struct list_head taprio_list;
73 static ktime_t sched_base_time(const struct sched_gate_list *sched)
78 return ns_to_ktime(sched->base_time);
81 static void taprio_free_sched_cb(struct rcu_head *head)
83 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
84 struct sched_entry *entry, *n;
89 list_for_each_entry_safe(entry, n, &sched->entries, list) {
90 list_del(&entry->list);
97 static void switch_schedules(struct taprio_sched *q,
98 struct sched_gate_list **admin,
99 struct sched_gate_list **oper)
101 rcu_assign_pointer(q->oper_sched, *admin);
102 rcu_assign_pointer(q->admin_sched, NULL);
105 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
111 static ktime_t get_cycle_time(struct sched_gate_list *sched)
113 struct sched_entry *entry;
116 if (sched->cycle_time != 0)
117 return sched->cycle_time;
119 list_for_each_entry(entry, &sched->entries, list)
120 cycle = ktime_add_ns(cycle, entry->interval);
122 sched->cycle_time = cycle;
127 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
128 struct sk_buff **to_free)
130 struct taprio_sched *q = qdisc_priv(sch);
134 queue = skb_get_queue_mapping(skb);
136 child = q->qdiscs[queue];
137 if (unlikely(!child))
138 return qdisc_drop(skb, sch, to_free);
140 qdisc_qstats_backlog_inc(sch, skb);
143 return qdisc_enqueue(skb, child, to_free);
146 static struct sk_buff *taprio_peek(struct Qdisc *sch)
148 struct taprio_sched *q = qdisc_priv(sch);
149 struct net_device *dev = qdisc_dev(sch);
150 struct sched_entry *entry;
156 entry = rcu_dereference(q->current_entry);
157 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
163 for (i = 0; i < dev->num_tx_queues; i++) {
164 struct Qdisc *child = q->qdiscs[i];
168 if (unlikely(!child))
171 skb = child->ops->peek(child);
175 prio = skb->priority;
176 tc = netdev_get_prio_tc_map(dev, prio);
178 if (!(gate_mask & BIT(tc)))
187 static inline int length_to_duration(struct taprio_sched *q, int len)
189 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
192 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
194 atomic_set(&entry->budget,
195 div64_u64((u64)entry->interval * 1000,
196 atomic64_read(&q->picos_per_byte)));
199 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
201 struct taprio_sched *q = qdisc_priv(sch);
202 struct net_device *dev = qdisc_dev(sch);
203 struct sk_buff *skb = NULL;
204 struct sched_entry *entry;
208 if (atomic64_read(&q->picos_per_byte) == -1) {
209 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
214 entry = rcu_dereference(q->current_entry);
215 /* if there's no entry, it means that the schedule didn't
216 * start yet, so force all gates to be open, this is in
217 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
220 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
225 for (i = 0; i < dev->num_tx_queues; i++) {
226 struct Qdisc *child = q->qdiscs[i];
232 if (unlikely(!child))
235 skb = child->ops->peek(child);
239 prio = skb->priority;
240 tc = netdev_get_prio_tc_map(dev, prio);
242 if (!(gate_mask & BIT(tc)))
245 len = qdisc_pkt_len(skb);
246 guard = ktime_add_ns(q->get_time(),
247 length_to_duration(q, len));
249 /* In the case that there's no gate entry, there's no
252 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
253 ktime_after(guard, entry->close_time))
256 /* ... and no budget. */
257 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
258 atomic_sub_return(len, &entry->budget) < 0)
261 skb = child->ops->dequeue(child);
265 qdisc_bstats_update(sch, skb);
266 qdisc_qstats_backlog_dec(sch, skb);
278 static bool should_restart_cycle(const struct sched_gate_list *oper,
279 const struct sched_entry *entry)
281 if (list_is_last(&entry->list, &oper->entries))
284 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
290 static bool should_change_schedules(const struct sched_gate_list *admin,
291 const struct sched_gate_list *oper,
294 ktime_t next_base_time, extension_time;
299 next_base_time = sched_base_time(admin);
301 /* This is the simple case, the close_time would fall after
302 * the next schedule base_time.
304 if (ktime_compare(next_base_time, close_time) <= 0)
307 /* This is the cycle_time_extension case, if the close_time
308 * plus the amount that can be extended would fall after the
309 * next schedule base_time, we can extend the current schedule
312 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
314 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
315 * how precisely the extension should be made. So after
316 * conformance testing, this logic may change.
318 if (ktime_compare(next_base_time, extension_time) <= 0)
324 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
326 struct taprio_sched *q = container_of(timer, struct taprio_sched,
328 struct sched_gate_list *oper, *admin;
329 struct sched_entry *entry, *next;
330 struct Qdisc *sch = q->root;
333 spin_lock(&q->current_entry_lock);
334 entry = rcu_dereference_protected(q->current_entry,
335 lockdep_is_held(&q->current_entry_lock));
336 oper = rcu_dereference_protected(q->oper_sched,
337 lockdep_is_held(&q->current_entry_lock));
338 admin = rcu_dereference_protected(q->admin_sched,
339 lockdep_is_held(&q->current_entry_lock));
342 switch_schedules(q, &admin, &oper);
344 /* This can happen in two cases: 1. this is the very first run
345 * of this function (i.e. we weren't running any schedule
346 * previously); 2. The previous schedule just ended. The first
347 * entry of all schedules are pre-calculated during the
348 * schedule initialization.
350 if (unlikely(!entry || entry->close_time == oper->base_time)) {
351 next = list_first_entry(&oper->entries, struct sched_entry,
353 close_time = next->close_time;
357 if (should_restart_cycle(oper, entry)) {
358 next = list_first_entry(&oper->entries, struct sched_entry,
360 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
363 next = list_next_entry(entry, list);
366 close_time = ktime_add_ns(entry->close_time, next->interval);
367 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
369 if (should_change_schedules(admin, oper, close_time)) {
370 /* Set things so the next time this runs, the new
373 close_time = sched_base_time(admin);
374 switch_schedules(q, &admin, &oper);
377 next->close_time = close_time;
378 taprio_set_budget(q, next);
381 rcu_assign_pointer(q->current_entry, next);
382 spin_unlock(&q->current_entry_lock);
384 hrtimer_set_expires(&q->advance_timer, close_time);
387 __netif_schedule(sch);
390 return HRTIMER_RESTART;
393 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
394 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
395 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
396 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
397 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
400 static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
401 [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
404 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
405 [TCA_TAPRIO_ATTR_PRIOMAP] = {
406 .len = sizeof(struct tc_mqprio_qopt)
408 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
409 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
410 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
411 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
412 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
413 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
416 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
417 struct netlink_ext_ack *extack)
421 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
422 entry->command = nla_get_u8(
423 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
425 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
426 entry->gate_mask = nla_get_u32(
427 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
429 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
430 interval = nla_get_u32(
431 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
434 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
438 entry->interval = interval;
443 static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
444 int index, struct netlink_ext_ack *extack)
446 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
449 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
452 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
456 entry->index = index;
458 return fill_sched_entry(tb, entry, extack);
461 static int parse_sched_list(struct nlattr *list,
462 struct sched_gate_list *sched,
463 struct netlink_ext_ack *extack)
472 nla_for_each_nested(n, list, rem) {
473 struct sched_entry *entry;
475 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
476 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
480 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
482 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
486 err = parse_sched_entry(n, entry, i, extack);
492 list_add_tail(&entry->list, &sched->entries);
496 sched->num_entries = i;
501 static int parse_taprio_schedule(struct nlattr **tb,
502 struct sched_gate_list *new,
503 struct netlink_ext_ack *extack)
507 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
508 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
512 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
513 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
515 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
516 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
518 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
519 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
521 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
522 err = parse_sched_list(
523 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
530 static int taprio_parse_mqprio_opt(struct net_device *dev,
531 struct tc_mqprio_qopt *qopt,
532 struct netlink_ext_ack *extack)
536 if (!qopt && !dev->num_tc) {
537 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
541 /* If num_tc is already set, it means that the user already
542 * configured the mqprio part
547 /* Verify num_tc is not out of max range */
548 if (qopt->num_tc > TC_MAX_QUEUE) {
549 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
553 /* taprio imposes that traffic classes map 1:n to tx queues */
554 if (qopt->num_tc > dev->num_tx_queues) {
555 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
559 /* Verify priority mapping uses valid tcs */
560 for (i = 0; i < TC_BITMASK + 1; i++) {
561 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
562 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
567 for (i = 0; i < qopt->num_tc; i++) {
568 unsigned int last = qopt->offset[i] + qopt->count[i];
570 /* Verify the queue count is in tx range being equal to the
571 * real_num_tx_queues indicates the last queue is in use.
573 if (qopt->offset[i] >= dev->num_tx_queues ||
575 last > dev->real_num_tx_queues) {
576 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
580 /* Verify that the offset and counts do not overlap */
581 for (j = i + 1; j < qopt->num_tc; j++) {
582 if (last > qopt->offset[j]) {
583 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
592 static int taprio_get_start_time(struct Qdisc *sch,
593 struct sched_gate_list *sched,
596 struct taprio_sched *q = qdisc_priv(sch);
597 ktime_t now, base, cycle;
600 base = sched_base_time(sched);
603 if (ktime_after(base, now)) {
608 cycle = get_cycle_time(sched);
610 /* The qdisc is expected to have at least one sched_entry. Moreover,
611 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
612 * something went really wrong. In that case, we should warn about this
613 * inconsistent state and return error.
618 /* Schedule the start time for the beginning of the next
621 n = div64_s64(ktime_sub_ns(now, base), cycle);
622 *start = ktime_add_ns(base, (n + 1) * cycle);
626 static void setup_first_close_time(struct taprio_sched *q,
627 struct sched_gate_list *sched, ktime_t base)
629 struct sched_entry *first;
632 first = list_first_entry(&sched->entries,
633 struct sched_entry, list);
635 cycle = get_cycle_time(sched);
637 /* FIXME: find a better place to do this */
638 sched->cycle_close_time = ktime_add_ns(base, cycle);
640 first->close_time = ktime_add_ns(base, first->interval);
641 taprio_set_budget(q, first);
642 rcu_assign_pointer(q->current_entry, NULL);
645 static void taprio_start_sched(struct Qdisc *sch,
646 ktime_t start, struct sched_gate_list *new)
648 struct taprio_sched *q = qdisc_priv(sch);
651 expires = hrtimer_get_expires(&q->advance_timer);
655 /* If the new schedule starts before the next expiration, we
656 * reprogram it to the earliest one, so we change the admin
657 * schedule to the operational one at the right time.
659 start = min_t(ktime_t, start, expires);
661 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
664 static void taprio_set_picos_per_byte(struct net_device *dev,
665 struct taprio_sched *q)
667 struct ethtool_link_ksettings ecmd;
668 int picos_per_byte = -1;
670 if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
671 ecmd.base.speed != SPEED_UNKNOWN)
672 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
673 ecmd.base.speed * 1000 * 1000);
675 atomic64_set(&q->picos_per_byte, picos_per_byte);
676 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
677 dev->name, (long long)atomic64_read(&q->picos_per_byte),
681 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
684 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
685 struct net_device *qdev;
686 struct taprio_sched *q;
691 if (event != NETDEV_UP && event != NETDEV_CHANGE)
694 spin_lock(&taprio_list_lock);
695 list_for_each_entry(q, &taprio_list, taprio_list) {
696 qdev = qdisc_dev(q->root);
702 spin_unlock(&taprio_list_lock);
705 taprio_set_picos_per_byte(dev, q);
710 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
711 struct netlink_ext_ack *extack)
713 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
714 struct sched_gate_list *oper, *admin, *new_admin;
715 struct taprio_sched *q = qdisc_priv(sch);
716 struct net_device *dev = qdisc_dev(sch);
717 struct tc_mqprio_qopt *mqprio = NULL;
722 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
723 taprio_policy, extack);
727 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
728 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
730 err = taprio_parse_mqprio_opt(dev, mqprio, extack);
734 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
736 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
739 INIT_LIST_HEAD(&new_admin->entries);
742 oper = rcu_dereference(q->oper_sched);
743 admin = rcu_dereference(q->admin_sched);
746 if (mqprio && (oper || admin)) {
747 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
752 err = parse_taprio_schedule(tb, new_admin, extack);
756 if (new_admin->num_entries == 0) {
757 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
762 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
763 clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
765 /* We only support static clockids and we don't allow
766 * for it to be modified after the first init.
769 (q->clockid != -1 && q->clockid != clockid)) {
770 NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported");
775 q->clockid = clockid;
778 if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
779 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
784 taprio_set_picos_per_byte(dev, q);
786 /* Protects against enqueue()/dequeue() */
787 spin_lock_bh(qdisc_lock(sch));
789 if (!hrtimer_active(&q->advance_timer)) {
790 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
791 q->advance_timer.function = advance_sched;
795 netdev_set_num_tc(dev, mqprio->num_tc);
796 for (i = 0; i < mqprio->num_tc; i++)
797 netdev_set_tc_queue(dev, i,
801 /* Always use supplied priority mappings */
802 for (i = 0; i < TC_BITMASK + 1; i++)
803 netdev_set_prio_tc_map(dev, i,
804 mqprio->prio_tc_map[i]);
807 switch (q->clockid) {
809 q->get_time = ktime_get_real;
811 case CLOCK_MONOTONIC:
812 q->get_time = ktime_get;
815 q->get_time = ktime_get_boottime;
818 q->get_time = ktime_get_clocktai;
821 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
826 err = taprio_get_start_time(sch, new_admin, &start);
828 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
832 setup_first_close_time(q, new_admin, start);
834 /* Protects against advance_sched() */
835 spin_lock_irqsave(&q->current_entry_lock, flags);
837 taprio_start_sched(sch, start, new_admin);
839 rcu_assign_pointer(q->admin_sched, new_admin);
841 call_rcu(&admin->rcu, taprio_free_sched_cb);
844 spin_unlock_irqrestore(&q->current_entry_lock, flags);
849 spin_unlock_bh(qdisc_lock(sch));
857 static void taprio_destroy(struct Qdisc *sch)
859 struct taprio_sched *q = qdisc_priv(sch);
860 struct net_device *dev = qdisc_dev(sch);
863 spin_lock(&taprio_list_lock);
864 list_del(&q->taprio_list);
865 spin_unlock(&taprio_list_lock);
867 hrtimer_cancel(&q->advance_timer);
870 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
871 qdisc_put(q->qdiscs[i]);
877 netdev_set_num_tc(dev, 0);
880 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
883 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
886 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
887 struct netlink_ext_ack *extack)
889 struct taprio_sched *q = qdisc_priv(sch);
890 struct net_device *dev = qdisc_dev(sch);
893 spin_lock_init(&q->current_entry_lock);
895 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
896 q->advance_timer.function = advance_sched;
900 /* We only support static clockids. Use an invalid value as default
901 * and get the valid one on taprio_change().
905 if (sch->parent != TC_H_ROOT)
908 if (!netif_is_multiqueue(dev))
911 /* pre-allocate qdisc, attachment can't fail */
912 q->qdiscs = kcalloc(dev->num_tx_queues,
913 sizeof(q->qdiscs[0]),
922 spin_lock(&taprio_list_lock);
923 list_add(&q->taprio_list, &taprio_list);
924 spin_unlock(&taprio_list_lock);
926 for (i = 0; i < dev->num_tx_queues; i++) {
927 struct netdev_queue *dev_queue;
930 dev_queue = netdev_get_tx_queue(dev, i);
931 qdisc = qdisc_create_dflt(dev_queue,
933 TC_H_MAKE(TC_H_MAJ(sch->handle),
939 if (i < dev->real_num_tx_queues)
940 qdisc_hash_add(qdisc, false);
942 q->qdiscs[i] = qdisc;
945 return taprio_change(sch, opt, extack);
948 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
951 struct net_device *dev = qdisc_dev(sch);
952 unsigned long ntx = cl - 1;
954 if (ntx >= dev->num_tx_queues)
957 return netdev_get_tx_queue(dev, ntx);
960 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
961 struct Qdisc *new, struct Qdisc **old,
962 struct netlink_ext_ack *extack)
964 struct taprio_sched *q = qdisc_priv(sch);
965 struct net_device *dev = qdisc_dev(sch);
966 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
971 if (dev->flags & IFF_UP)
974 *old = q->qdiscs[cl - 1];
975 q->qdiscs[cl - 1] = new;
978 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
980 if (dev->flags & IFF_UP)
986 static int dump_entry(struct sk_buff *msg,
987 const struct sched_entry *entry)
991 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
995 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
996 goto nla_put_failure;
998 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
999 goto nla_put_failure;
1001 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1003 goto nla_put_failure;
1005 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1007 goto nla_put_failure;
1009 return nla_nest_end(msg, item);
1012 nla_nest_cancel(msg, item);
1016 static int dump_schedule(struct sk_buff *msg,
1017 const struct sched_gate_list *root)
1019 struct nlattr *entry_list;
1020 struct sched_entry *entry;
1022 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1023 root->base_time, TCA_TAPRIO_PAD))
1026 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1027 root->cycle_time, TCA_TAPRIO_PAD))
1030 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1031 root->cycle_time_extension, TCA_TAPRIO_PAD))
1034 entry_list = nla_nest_start_noflag(msg,
1035 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1039 list_for_each_entry(entry, &root->entries, list) {
1040 if (dump_entry(msg, entry) < 0)
1044 nla_nest_end(msg, entry_list);
1048 nla_nest_cancel(msg, entry_list);
1052 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1054 struct taprio_sched *q = qdisc_priv(sch);
1055 struct net_device *dev = qdisc_dev(sch);
1056 struct sched_gate_list *oper, *admin;
1057 struct tc_mqprio_qopt opt = { 0 };
1058 struct nlattr *nest, *sched_nest;
1062 oper = rcu_dereference(q->oper_sched);
1063 admin = rcu_dereference(q->admin_sched);
1065 opt.num_tc = netdev_get_num_tc(dev);
1066 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1068 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1069 opt.count[i] = dev->tc_to_txq[i].count;
1070 opt.offset[i] = dev->tc_to_txq[i].offset;
1073 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1077 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1080 if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1083 if (oper && dump_schedule(skb, oper))
1089 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1093 if (dump_schedule(skb, admin))
1096 nla_nest_end(skb, sched_nest);
1101 return nla_nest_end(skb, nest);
1104 nla_nest_cancel(skb, sched_nest);
1107 nla_nest_cancel(skb, nest);
1114 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1116 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1121 return dev_queue->qdisc_sleeping;
1124 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1126 unsigned int ntx = TC_H_MIN(classid);
1128 if (!taprio_queue_get(sch, ntx))
1133 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1134 struct sk_buff *skb, struct tcmsg *tcm)
1136 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1138 tcm->tcm_parent = TC_H_ROOT;
1139 tcm->tcm_handle |= TC_H_MIN(cl);
1140 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1145 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1146 struct gnet_dump *d)
1150 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1152 sch = dev_queue->qdisc_sleeping;
1153 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1154 qdisc_qstats_copy(d, sch) < 0)
1159 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1161 struct net_device *dev = qdisc_dev(sch);
1167 arg->count = arg->skip;
1168 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1169 if (arg->fn(sch, ntx + 1, arg) < 0) {
1177 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1180 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1183 static const struct Qdisc_class_ops taprio_class_ops = {
1184 .graft = taprio_graft,
1185 .leaf = taprio_leaf,
1186 .find = taprio_find,
1187 .walk = taprio_walk,
1188 .dump = taprio_dump_class,
1189 .dump_stats = taprio_dump_class_stats,
1190 .select_queue = taprio_select_queue,
1193 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1194 .cl_ops = &taprio_class_ops,
1196 .priv_size = sizeof(struct taprio_sched),
1197 .init = taprio_init,
1198 .change = taprio_change,
1199 .destroy = taprio_destroy,
1200 .peek = taprio_peek,
1201 .dequeue = taprio_dequeue,
1202 .enqueue = taprio_enqueue,
1203 .dump = taprio_dump,
1204 .owner = THIS_MODULE,
1207 static struct notifier_block taprio_device_notifier = {
1208 .notifier_call = taprio_dev_notifier,
1211 static int __init taprio_module_init(void)
1213 int err = register_netdevice_notifier(&taprio_device_notifier);
1218 return register_qdisc(&taprio_qdisc_ops);
1221 static void __exit taprio_module_exit(void)
1223 unregister_qdisc(&taprio_qdisc_ops);
1224 unregister_netdevice_notifier(&taprio_device_notifier);
1227 module_init(taprio_module_init);
1228 module_exit(taprio_module_exit);
1229 MODULE_LICENSE("GPL");