Merge tag 'kvm-s390-next-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / sched / sch_taprio.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* net/sched/sch_taprio.c        Time Aware Priority Scheduler
4  *
5  * Authors:     Vinicius Costa Gomes <vinicius.gomes@intel.com>
6  *
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/math64.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
24 #include <net/sock.h>
25 #include <net/tcp.h>
26
27 static LIST_HEAD(taprio_list);
28 static DEFINE_SPINLOCK(taprio_list_lock);
29
30 #define TAPRIO_ALL_GATES_OPEN -1
31
32 #define FLAGS_VALID(flags) (!((flags) & ~TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST))
33 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
34
35 struct sched_entry {
36         struct list_head list;
37
38         /* The instant that this entry "closes" and the next one
39          * should open, the qdisc will make some effort so that no
40          * packet leaves after this time.
41          */
42         ktime_t close_time;
43         ktime_t next_txtime;
44         atomic_t budget;
45         int index;
46         u32 gate_mask;
47         u32 interval;
48         u8 command;
49 };
50
51 struct sched_gate_list {
52         struct rcu_head rcu;
53         struct list_head entries;
54         size_t num_entries;
55         ktime_t cycle_close_time;
56         s64 cycle_time;
57         s64 cycle_time_extension;
58         s64 base_time;
59 };
60
61 struct taprio_sched {
62         struct Qdisc **qdiscs;
63         struct Qdisc *root;
64         u32 flags;
65         enum tk_offsets tk_offset;
66         int clockid;
67         atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
68                                     * speeds it's sub-nanoseconds per byte
69                                     */
70
71         /* Protects the update side of the RCU protected current_entry */
72         spinlock_t current_entry_lock;
73         struct sched_entry __rcu *current_entry;
74         struct sched_gate_list __rcu *oper_sched;
75         struct sched_gate_list __rcu *admin_sched;
76         struct hrtimer advance_timer;
77         struct list_head taprio_list;
78         int txtime_delay;
79 };
80
81 static ktime_t sched_base_time(const struct sched_gate_list *sched)
82 {
83         if (!sched)
84                 return KTIME_MAX;
85
86         return ns_to_ktime(sched->base_time);
87 }
88
89 static ktime_t taprio_get_time(struct taprio_sched *q)
90 {
91         ktime_t mono = ktime_get();
92
93         switch (q->tk_offset) {
94         case TK_OFFS_MAX:
95                 return mono;
96         default:
97                 return ktime_mono_to_any(mono, q->tk_offset);
98         }
99
100         return KTIME_MAX;
101 }
102
103 static void taprio_free_sched_cb(struct rcu_head *head)
104 {
105         struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
106         struct sched_entry *entry, *n;
107
108         if (!sched)
109                 return;
110
111         list_for_each_entry_safe(entry, n, &sched->entries, list) {
112                 list_del(&entry->list);
113                 kfree(entry);
114         }
115
116         kfree(sched);
117 }
118
119 static void switch_schedules(struct taprio_sched *q,
120                              struct sched_gate_list **admin,
121                              struct sched_gate_list **oper)
122 {
123         rcu_assign_pointer(q->oper_sched, *admin);
124         rcu_assign_pointer(q->admin_sched, NULL);
125
126         if (*oper)
127                 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
128
129         *oper = *admin;
130         *admin = NULL;
131 }
132
133 /* Get how much time has been already elapsed in the current cycle. */
134 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
135 {
136         ktime_t time_since_sched_start;
137         s32 time_elapsed;
138
139         time_since_sched_start = ktime_sub(time, sched->base_time);
140         div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
141
142         return time_elapsed;
143 }
144
145 static ktime_t get_interval_end_time(struct sched_gate_list *sched,
146                                      struct sched_gate_list *admin,
147                                      struct sched_entry *entry,
148                                      ktime_t intv_start)
149 {
150         s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
151         ktime_t intv_end, cycle_ext_end, cycle_end;
152
153         cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
154         intv_end = ktime_add_ns(intv_start, entry->interval);
155         cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
156
157         if (ktime_before(intv_end, cycle_end))
158                 return intv_end;
159         else if (admin && admin != sched &&
160                  ktime_after(admin->base_time, cycle_end) &&
161                  ktime_before(admin->base_time, cycle_ext_end))
162                 return admin->base_time;
163         else
164                 return cycle_end;
165 }
166
167 static int length_to_duration(struct taprio_sched *q, int len)
168 {
169         return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
170 }
171
172 /* Returns the entry corresponding to next available interval. If
173  * validate_interval is set, it only validates whether the timestamp occurs
174  * when the gate corresponding to the skb's traffic class is open.
175  */
176 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
177                                                   struct Qdisc *sch,
178                                                   struct sched_gate_list *sched,
179                                                   struct sched_gate_list *admin,
180                                                   ktime_t time,
181                                                   ktime_t *interval_start,
182                                                   ktime_t *interval_end,
183                                                   bool validate_interval)
184 {
185         ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
186         ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
187         struct sched_entry *entry = NULL, *entry_found = NULL;
188         struct taprio_sched *q = qdisc_priv(sch);
189         struct net_device *dev = qdisc_dev(sch);
190         bool entry_available = false;
191         s32 cycle_elapsed;
192         int tc, n;
193
194         tc = netdev_get_prio_tc_map(dev, skb->priority);
195         packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
196
197         *interval_start = 0;
198         *interval_end = 0;
199
200         if (!sched)
201                 return NULL;
202
203         cycle = sched->cycle_time;
204         cycle_elapsed = get_cycle_time_elapsed(sched, time);
205         curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
206         cycle_end = ktime_add_ns(curr_intv_end, cycle);
207
208         list_for_each_entry(entry, &sched->entries, list) {
209                 curr_intv_start = curr_intv_end;
210                 curr_intv_end = get_interval_end_time(sched, admin, entry,
211                                                       curr_intv_start);
212
213                 if (ktime_after(curr_intv_start, cycle_end))
214                         break;
215
216                 if (!(entry->gate_mask & BIT(tc)) ||
217                     packet_transmit_time > entry->interval)
218                         continue;
219
220                 txtime = entry->next_txtime;
221
222                 if (ktime_before(txtime, time) || validate_interval) {
223                         transmit_end_time = ktime_add_ns(time, packet_transmit_time);
224                         if ((ktime_before(curr_intv_start, time) &&
225                              ktime_before(transmit_end_time, curr_intv_end)) ||
226                             (ktime_after(curr_intv_start, time) && !validate_interval)) {
227                                 entry_found = entry;
228                                 *interval_start = curr_intv_start;
229                                 *interval_end = curr_intv_end;
230                                 break;
231                         } else if (!entry_available && !validate_interval) {
232                                 /* Here, we are just trying to find out the
233                                  * first available interval in the next cycle.
234                                  */
235                                 entry_available = 1;
236                                 entry_found = entry;
237                                 *interval_start = ktime_add_ns(curr_intv_start, cycle);
238                                 *interval_end = ktime_add_ns(curr_intv_end, cycle);
239                         }
240                 } else if (ktime_before(txtime, earliest_txtime) &&
241                            !entry_available) {
242                         earliest_txtime = txtime;
243                         entry_found = entry;
244                         n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
245                         *interval_start = ktime_add(curr_intv_start, n * cycle);
246                         *interval_end = ktime_add(curr_intv_end, n * cycle);
247                 }
248         }
249
250         return entry_found;
251 }
252
253 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
254 {
255         struct taprio_sched *q = qdisc_priv(sch);
256         struct sched_gate_list *sched, *admin;
257         ktime_t interval_start, interval_end;
258         struct sched_entry *entry;
259
260         rcu_read_lock();
261         sched = rcu_dereference(q->oper_sched);
262         admin = rcu_dereference(q->admin_sched);
263
264         entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
265                                        &interval_start, &interval_end, true);
266         rcu_read_unlock();
267
268         return entry;
269 }
270
271 /* This returns the tstamp value set by TCP in terms of the set clock. */
272 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
273 {
274         unsigned int offset = skb_network_offset(skb);
275         const struct ipv6hdr *ipv6h;
276         const struct iphdr *iph;
277         struct ipv6hdr _ipv6h;
278
279         ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
280         if (!ipv6h)
281                 return 0;
282
283         if (ipv6h->version == 4) {
284                 iph = (struct iphdr *)ipv6h;
285                 offset += iph->ihl * 4;
286
287                 /* special-case 6in4 tunnelling, as that is a common way to get
288                  * v6 connectivity in the home
289                  */
290                 if (iph->protocol == IPPROTO_IPV6) {
291                         ipv6h = skb_header_pointer(skb, offset,
292                                                    sizeof(_ipv6h), &_ipv6h);
293
294                         if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
295                                 return 0;
296                 } else if (iph->protocol != IPPROTO_TCP) {
297                         return 0;
298                 }
299         } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
300                 return 0;
301         }
302
303         return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
304 }
305
306 /* There are a few scenarios where we will have to modify the txtime from
307  * what is read from next_txtime in sched_entry. They are:
308  * 1. If txtime is in the past,
309  *    a. The gate for the traffic class is currently open and packet can be
310  *       transmitted before it closes, schedule the packet right away.
311  *    b. If the gate corresponding to the traffic class is going to open later
312  *       in the cycle, set the txtime of packet to the interval start.
313  * 2. If txtime is in the future, there are packets corresponding to the
314  *    current traffic class waiting to be transmitted. So, the following
315  *    possibilities exist:
316  *    a. We can transmit the packet before the window containing the txtime
317  *       closes.
318  *    b. The window might close before the transmission can be completed
319  *       successfully. So, schedule the packet in the next open window.
320  */
321 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
322 {
323         ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
324         struct taprio_sched *q = qdisc_priv(sch);
325         struct sched_gate_list *sched, *admin;
326         ktime_t minimum_time, now, txtime;
327         int len, packet_transmit_time;
328         struct sched_entry *entry;
329         bool sched_changed;
330
331         now = taprio_get_time(q);
332         minimum_time = ktime_add_ns(now, q->txtime_delay);
333
334         tcp_tstamp = get_tcp_tstamp(q, skb);
335         minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
336
337         rcu_read_lock();
338         admin = rcu_dereference(q->admin_sched);
339         sched = rcu_dereference(q->oper_sched);
340         if (admin && ktime_after(minimum_time, admin->base_time))
341                 switch_schedules(q, &admin, &sched);
342
343         /* Until the schedule starts, all the queues are open */
344         if (!sched || ktime_before(minimum_time, sched->base_time)) {
345                 txtime = minimum_time;
346                 goto done;
347         }
348
349         len = qdisc_pkt_len(skb);
350         packet_transmit_time = length_to_duration(q, len);
351
352         do {
353                 sched_changed = 0;
354
355                 entry = find_entry_to_transmit(skb, sch, sched, admin,
356                                                minimum_time,
357                                                &interval_start, &interval_end,
358                                                false);
359                 if (!entry) {
360                         txtime = 0;
361                         goto done;
362                 }
363
364                 txtime = entry->next_txtime;
365                 txtime = max_t(ktime_t, txtime, minimum_time);
366                 txtime = max_t(ktime_t, txtime, interval_start);
367
368                 if (admin && admin != sched &&
369                     ktime_after(txtime, admin->base_time)) {
370                         sched = admin;
371                         sched_changed = 1;
372                         continue;
373                 }
374
375                 transmit_end_time = ktime_add(txtime, packet_transmit_time);
376                 minimum_time = transmit_end_time;
377
378                 /* Update the txtime of current entry to the next time it's
379                  * interval starts.
380                  */
381                 if (ktime_after(transmit_end_time, interval_end))
382                         entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
383         } while (sched_changed || ktime_after(transmit_end_time, interval_end));
384
385         entry->next_txtime = transmit_end_time;
386
387 done:
388         rcu_read_unlock();
389         return txtime;
390 }
391
392 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
393                           struct sk_buff **to_free)
394 {
395         struct taprio_sched *q = qdisc_priv(sch);
396         struct Qdisc *child;
397         int queue;
398
399         queue = skb_get_queue_mapping(skb);
400
401         child = q->qdiscs[queue];
402         if (unlikely(!child))
403                 return qdisc_drop(skb, sch, to_free);
404
405         if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
406                 if (!is_valid_interval(skb, sch))
407                         return qdisc_drop(skb, sch, to_free);
408         } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
409                 skb->tstamp = get_packet_txtime(skb, sch);
410                 if (!skb->tstamp)
411                         return qdisc_drop(skb, sch, to_free);
412         }
413
414         qdisc_qstats_backlog_inc(sch, skb);
415         sch->q.qlen++;
416
417         return qdisc_enqueue(skb, child, to_free);
418 }
419
420 static struct sk_buff *taprio_peek(struct Qdisc *sch)
421 {
422         struct taprio_sched *q = qdisc_priv(sch);
423         struct net_device *dev = qdisc_dev(sch);
424         struct sched_entry *entry;
425         struct sk_buff *skb;
426         u32 gate_mask;
427         int i;
428
429         rcu_read_lock();
430         entry = rcu_dereference(q->current_entry);
431         gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
432         rcu_read_unlock();
433
434         if (!gate_mask)
435                 return NULL;
436
437         for (i = 0; i < dev->num_tx_queues; i++) {
438                 struct Qdisc *child = q->qdiscs[i];
439                 int prio;
440                 u8 tc;
441
442                 if (unlikely(!child))
443                         continue;
444
445                 skb = child->ops->peek(child);
446                 if (!skb)
447                         continue;
448
449                 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
450                         return skb;
451
452                 prio = skb->priority;
453                 tc = netdev_get_prio_tc_map(dev, prio);
454
455                 if (!(gate_mask & BIT(tc)))
456                         continue;
457
458                 return skb;
459         }
460
461         return NULL;
462 }
463
464 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
465 {
466         atomic_set(&entry->budget,
467                    div64_u64((u64)entry->interval * 1000,
468                              atomic64_read(&q->picos_per_byte)));
469 }
470
471 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
472 {
473         struct taprio_sched *q = qdisc_priv(sch);
474         struct net_device *dev = qdisc_dev(sch);
475         struct sk_buff *skb = NULL;
476         struct sched_entry *entry;
477         u32 gate_mask;
478         int i;
479
480         if (atomic64_read(&q->picos_per_byte) == -1) {
481                 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
482                 return NULL;
483         }
484
485         rcu_read_lock();
486         entry = rcu_dereference(q->current_entry);
487         /* if there's no entry, it means that the schedule didn't
488          * start yet, so force all gates to be open, this is in
489          * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
490          * "AdminGateSates"
491          */
492         gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
493
494         if (!gate_mask)
495                 goto done;
496
497         for (i = 0; i < dev->num_tx_queues; i++) {
498                 struct Qdisc *child = q->qdiscs[i];
499                 ktime_t guard;
500                 int prio;
501                 int len;
502                 u8 tc;
503
504                 if (unlikely(!child))
505                         continue;
506
507                 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
508                         skb = child->ops->dequeue(child);
509                         if (!skb)
510                                 continue;
511                         goto skb_found;
512                 }
513
514                 skb = child->ops->peek(child);
515                 if (!skb)
516                         continue;
517
518                 prio = skb->priority;
519                 tc = netdev_get_prio_tc_map(dev, prio);
520
521                 if (!(gate_mask & BIT(tc)))
522                         continue;
523
524                 len = qdisc_pkt_len(skb);
525                 guard = ktime_add_ns(taprio_get_time(q),
526                                      length_to_duration(q, len));
527
528                 /* In the case that there's no gate entry, there's no
529                  * guard band ...
530                  */
531                 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
532                     ktime_after(guard, entry->close_time))
533                         continue;
534
535                 /* ... and no budget. */
536                 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
537                     atomic_sub_return(len, &entry->budget) < 0)
538                         continue;
539
540                 skb = child->ops->dequeue(child);
541                 if (unlikely(!skb))
542                         goto done;
543
544 skb_found:
545                 qdisc_bstats_update(sch, skb);
546                 qdisc_qstats_backlog_dec(sch, skb);
547                 sch->q.qlen--;
548
549                 goto done;
550         }
551
552 done:
553         rcu_read_unlock();
554
555         return skb;
556 }
557
558 static bool should_restart_cycle(const struct sched_gate_list *oper,
559                                  const struct sched_entry *entry)
560 {
561         if (list_is_last(&entry->list, &oper->entries))
562                 return true;
563
564         if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
565                 return true;
566
567         return false;
568 }
569
570 static bool should_change_schedules(const struct sched_gate_list *admin,
571                                     const struct sched_gate_list *oper,
572                                     ktime_t close_time)
573 {
574         ktime_t next_base_time, extension_time;
575
576         if (!admin)
577                 return false;
578
579         next_base_time = sched_base_time(admin);
580
581         /* This is the simple case, the close_time would fall after
582          * the next schedule base_time.
583          */
584         if (ktime_compare(next_base_time, close_time) <= 0)
585                 return true;
586
587         /* This is the cycle_time_extension case, if the close_time
588          * plus the amount that can be extended would fall after the
589          * next schedule base_time, we can extend the current schedule
590          * for that amount.
591          */
592         extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
593
594         /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
595          * how precisely the extension should be made. So after
596          * conformance testing, this logic may change.
597          */
598         if (ktime_compare(next_base_time, extension_time) <= 0)
599                 return true;
600
601         return false;
602 }
603
604 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
605 {
606         struct taprio_sched *q = container_of(timer, struct taprio_sched,
607                                               advance_timer);
608         struct sched_gate_list *oper, *admin;
609         struct sched_entry *entry, *next;
610         struct Qdisc *sch = q->root;
611         ktime_t close_time;
612
613         spin_lock(&q->current_entry_lock);
614         entry = rcu_dereference_protected(q->current_entry,
615                                           lockdep_is_held(&q->current_entry_lock));
616         oper = rcu_dereference_protected(q->oper_sched,
617                                          lockdep_is_held(&q->current_entry_lock));
618         admin = rcu_dereference_protected(q->admin_sched,
619                                           lockdep_is_held(&q->current_entry_lock));
620
621         if (!oper)
622                 switch_schedules(q, &admin, &oper);
623
624         /* This can happen in two cases: 1. this is the very first run
625          * of this function (i.e. we weren't running any schedule
626          * previously); 2. The previous schedule just ended. The first
627          * entry of all schedules are pre-calculated during the
628          * schedule initialization.
629          */
630         if (unlikely(!entry || entry->close_time == oper->base_time)) {
631                 next = list_first_entry(&oper->entries, struct sched_entry,
632                                         list);
633                 close_time = next->close_time;
634                 goto first_run;
635         }
636
637         if (should_restart_cycle(oper, entry)) {
638                 next = list_first_entry(&oper->entries, struct sched_entry,
639                                         list);
640                 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
641                                                       oper->cycle_time);
642         } else {
643                 next = list_next_entry(entry, list);
644         }
645
646         close_time = ktime_add_ns(entry->close_time, next->interval);
647         close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
648
649         if (should_change_schedules(admin, oper, close_time)) {
650                 /* Set things so the next time this runs, the new
651                  * schedule runs.
652                  */
653                 close_time = sched_base_time(admin);
654                 switch_schedules(q, &admin, &oper);
655         }
656
657         next->close_time = close_time;
658         taprio_set_budget(q, next);
659
660 first_run:
661         rcu_assign_pointer(q->current_entry, next);
662         spin_unlock(&q->current_entry_lock);
663
664         hrtimer_set_expires(&q->advance_timer, close_time);
665
666         rcu_read_lock();
667         __netif_schedule(sch);
668         rcu_read_unlock();
669
670         return HRTIMER_RESTART;
671 }
672
673 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
674         [TCA_TAPRIO_SCHED_ENTRY_INDEX]     = { .type = NLA_U32 },
675         [TCA_TAPRIO_SCHED_ENTRY_CMD]       = { .type = NLA_U8 },
676         [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
677         [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
678 };
679
680 static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
681         [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
682 };
683
684 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
685         [TCA_TAPRIO_ATTR_PRIOMAP]              = {
686                 .len = sizeof(struct tc_mqprio_qopt)
687         },
688         [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]           = { .type = NLA_NESTED },
689         [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
690         [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
691         [TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
692         [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
693         [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
694 };
695
696 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
697                             struct netlink_ext_ack *extack)
698 {
699         u32 interval = 0;
700
701         if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
702                 entry->command = nla_get_u8(
703                         tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
704
705         if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
706                 entry->gate_mask = nla_get_u32(
707                         tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
708
709         if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
710                 interval = nla_get_u32(
711                         tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
712
713         if (interval == 0) {
714                 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
715                 return -EINVAL;
716         }
717
718         entry->interval = interval;
719
720         return 0;
721 }
722
723 static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
724                              int index, struct netlink_ext_ack *extack)
725 {
726         struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
727         int err;
728
729         err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
730                                           entry_policy, NULL);
731         if (err < 0) {
732                 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
733                 return -EINVAL;
734         }
735
736         entry->index = index;
737
738         return fill_sched_entry(tb, entry, extack);
739 }
740
741 static int parse_sched_list(struct nlattr *list,
742                             struct sched_gate_list *sched,
743                             struct netlink_ext_ack *extack)
744 {
745         struct nlattr *n;
746         int err, rem;
747         int i = 0;
748
749         if (!list)
750                 return -EINVAL;
751
752         nla_for_each_nested(n, list, rem) {
753                 struct sched_entry *entry;
754
755                 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
756                         NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
757                         continue;
758                 }
759
760                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
761                 if (!entry) {
762                         NL_SET_ERR_MSG(extack, "Not enough memory for entry");
763                         return -ENOMEM;
764                 }
765
766                 err = parse_sched_entry(n, entry, i, extack);
767                 if (err < 0) {
768                         kfree(entry);
769                         return err;
770                 }
771
772                 list_add_tail(&entry->list, &sched->entries);
773                 i++;
774         }
775
776         sched->num_entries = i;
777
778         return i;
779 }
780
781 static int parse_taprio_schedule(struct nlattr **tb,
782                                  struct sched_gate_list *new,
783                                  struct netlink_ext_ack *extack)
784 {
785         int err = 0;
786
787         if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
788                 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
789                 return -ENOTSUPP;
790         }
791
792         if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
793                 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
794
795         if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
796                 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
797
798         if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
799                 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
800
801         if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
802                 err = parse_sched_list(
803                         tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
804         if (err < 0)
805                 return err;
806
807         if (!new->cycle_time) {
808                 struct sched_entry *entry;
809                 ktime_t cycle = 0;
810
811                 list_for_each_entry(entry, &new->entries, list)
812                         cycle = ktime_add_ns(cycle, entry->interval);
813                 new->cycle_time = cycle;
814         }
815
816         return 0;
817 }
818
819 static int taprio_parse_mqprio_opt(struct net_device *dev,
820                                    struct tc_mqprio_qopt *qopt,
821                                    struct netlink_ext_ack *extack,
822                                    u32 taprio_flags)
823 {
824         int i, j;
825
826         if (!qopt && !dev->num_tc) {
827                 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
828                 return -EINVAL;
829         }
830
831         /* If num_tc is already set, it means that the user already
832          * configured the mqprio part
833          */
834         if (dev->num_tc)
835                 return 0;
836
837         /* Verify num_tc is not out of max range */
838         if (qopt->num_tc > TC_MAX_QUEUE) {
839                 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
840                 return -EINVAL;
841         }
842
843         /* taprio imposes that traffic classes map 1:n to tx queues */
844         if (qopt->num_tc > dev->num_tx_queues) {
845                 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
846                 return -EINVAL;
847         }
848
849         /* Verify priority mapping uses valid tcs */
850         for (i = 0; i < TC_BITMASK + 1; i++) {
851                 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
852                         NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
853                         return -EINVAL;
854                 }
855         }
856
857         for (i = 0; i < qopt->num_tc; i++) {
858                 unsigned int last = qopt->offset[i] + qopt->count[i];
859
860                 /* Verify the queue count is in tx range being equal to the
861                  * real_num_tx_queues indicates the last queue is in use.
862                  */
863                 if (qopt->offset[i] >= dev->num_tx_queues ||
864                     !qopt->count[i] ||
865                     last > dev->real_num_tx_queues) {
866                         NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
867                         return -EINVAL;
868                 }
869
870                 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
871                         continue;
872
873                 /* Verify that the offset and counts do not overlap */
874                 for (j = i + 1; j < qopt->num_tc; j++) {
875                         if (last > qopt->offset[j]) {
876                                 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
877                                 return -EINVAL;
878                         }
879                 }
880         }
881
882         return 0;
883 }
884
885 static int taprio_get_start_time(struct Qdisc *sch,
886                                  struct sched_gate_list *sched,
887                                  ktime_t *start)
888 {
889         struct taprio_sched *q = qdisc_priv(sch);
890         ktime_t now, base, cycle;
891         s64 n;
892
893         base = sched_base_time(sched);
894         now = taprio_get_time(q);
895
896         if (ktime_after(base, now)) {
897                 *start = base;
898                 return 0;
899         }
900
901         cycle = sched->cycle_time;
902
903         /* The qdisc is expected to have at least one sched_entry.  Moreover,
904          * any entry must have 'interval' > 0. Thus if the cycle time is zero,
905          * something went really wrong. In that case, we should warn about this
906          * inconsistent state and return error.
907          */
908         if (WARN_ON(!cycle))
909                 return -EFAULT;
910
911         /* Schedule the start time for the beginning of the next
912          * cycle.
913          */
914         n = div64_s64(ktime_sub_ns(now, base), cycle);
915         *start = ktime_add_ns(base, (n + 1) * cycle);
916         return 0;
917 }
918
919 static void setup_first_close_time(struct taprio_sched *q,
920                                    struct sched_gate_list *sched, ktime_t base)
921 {
922         struct sched_entry *first;
923         ktime_t cycle;
924
925         first = list_first_entry(&sched->entries,
926                                  struct sched_entry, list);
927
928         cycle = sched->cycle_time;
929
930         /* FIXME: find a better place to do this */
931         sched->cycle_close_time = ktime_add_ns(base, cycle);
932
933         first->close_time = ktime_add_ns(base, first->interval);
934         taprio_set_budget(q, first);
935         rcu_assign_pointer(q->current_entry, NULL);
936 }
937
938 static void taprio_start_sched(struct Qdisc *sch,
939                                ktime_t start, struct sched_gate_list *new)
940 {
941         struct taprio_sched *q = qdisc_priv(sch);
942         ktime_t expires;
943
944         expires = hrtimer_get_expires(&q->advance_timer);
945         if (expires == 0)
946                 expires = KTIME_MAX;
947
948         /* If the new schedule starts before the next expiration, we
949          * reprogram it to the earliest one, so we change the admin
950          * schedule to the operational one at the right time.
951          */
952         start = min_t(ktime_t, start, expires);
953
954         hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
955 }
956
957 static void taprio_set_picos_per_byte(struct net_device *dev,
958                                       struct taprio_sched *q)
959 {
960         struct ethtool_link_ksettings ecmd;
961         int picos_per_byte = -1;
962
963         if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
964             ecmd.base.speed != SPEED_UNKNOWN)
965                 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
966                                            ecmd.base.speed * 1000 * 1000);
967
968         atomic64_set(&q->picos_per_byte, picos_per_byte);
969         netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
970                    dev->name, (long long)atomic64_read(&q->picos_per_byte),
971                    ecmd.base.speed);
972 }
973
974 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
975                                void *ptr)
976 {
977         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
978         struct net_device *qdev;
979         struct taprio_sched *q;
980         bool found = false;
981
982         ASSERT_RTNL();
983
984         if (event != NETDEV_UP && event != NETDEV_CHANGE)
985                 return NOTIFY_DONE;
986
987         spin_lock(&taprio_list_lock);
988         list_for_each_entry(q, &taprio_list, taprio_list) {
989                 qdev = qdisc_dev(q->root);
990                 if (qdev == dev) {
991                         found = true;
992                         break;
993                 }
994         }
995         spin_unlock(&taprio_list_lock);
996
997         if (found)
998                 taprio_set_picos_per_byte(dev, q);
999
1000         return NOTIFY_DONE;
1001 }
1002
1003 static void setup_txtime(struct taprio_sched *q,
1004                          struct sched_gate_list *sched, ktime_t base)
1005 {
1006         struct sched_entry *entry;
1007         u32 interval = 0;
1008
1009         list_for_each_entry(entry, &sched->entries, list) {
1010                 entry->next_txtime = ktime_add_ns(base, interval);
1011                 interval += entry->interval;
1012         }
1013 }
1014
1015 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1016                          struct netlink_ext_ack *extack)
1017 {
1018         struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1019         struct sched_gate_list *oper, *admin, *new_admin;
1020         struct taprio_sched *q = qdisc_priv(sch);
1021         struct net_device *dev = qdisc_dev(sch);
1022         struct tc_mqprio_qopt *mqprio = NULL;
1023         u32 taprio_flags = 0;
1024         int i, err, clockid;
1025         unsigned long flags;
1026         ktime_t start;
1027
1028         err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1029                                           taprio_policy, extack);
1030         if (err < 0)
1031                 return err;
1032
1033         if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1034                 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1035
1036         if (tb[TCA_TAPRIO_ATTR_FLAGS]) {
1037                 taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]);
1038
1039                 if (q->flags != 0 && q->flags != taprio_flags) {
1040                         NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1041                         return -EOPNOTSUPP;
1042                 } else if (!FLAGS_VALID(taprio_flags)) {
1043                         NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1044                         return -EINVAL;
1045                 }
1046
1047                 q->flags = taprio_flags;
1048         }
1049
1050         err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags);
1051         if (err < 0)
1052                 return err;
1053
1054         new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1055         if (!new_admin) {
1056                 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1057                 return -ENOMEM;
1058         }
1059         INIT_LIST_HEAD(&new_admin->entries);
1060
1061         rcu_read_lock();
1062         oper = rcu_dereference(q->oper_sched);
1063         admin = rcu_dereference(q->admin_sched);
1064         rcu_read_unlock();
1065
1066         if (mqprio && (oper || admin)) {
1067                 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1068                 err = -ENOTSUPP;
1069                 goto free_sched;
1070         }
1071
1072         err = parse_taprio_schedule(tb, new_admin, extack);
1073         if (err < 0)
1074                 goto free_sched;
1075
1076         if (new_admin->num_entries == 0) {
1077                 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1078                 err = -EINVAL;
1079                 goto free_sched;
1080         }
1081
1082         if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1083                 clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1084
1085                 /* We only support static clockids and we don't allow
1086                  * for it to be modified after the first init.
1087                  */
1088                 if (clockid < 0 ||
1089                     (q->clockid != -1 && q->clockid != clockid)) {
1090                         NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported");
1091                         err = -ENOTSUPP;
1092                         goto free_sched;
1093                 }
1094
1095                 q->clockid = clockid;
1096         }
1097
1098         if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1099                 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1100                 err = -EINVAL;
1101                 goto free_sched;
1102         }
1103
1104         taprio_set_picos_per_byte(dev, q);
1105
1106         /* Protects against enqueue()/dequeue() */
1107         spin_lock_bh(qdisc_lock(sch));
1108
1109         if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1110                 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1111                         NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1112                         err = -EINVAL;
1113                         goto unlock;
1114                 }
1115
1116                 q->txtime_delay = nla_get_s32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1117         }
1118
1119         if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
1120             !hrtimer_active(&q->advance_timer)) {
1121                 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1122                 q->advance_timer.function = advance_sched;
1123         }
1124
1125         if (mqprio) {
1126                 netdev_set_num_tc(dev, mqprio->num_tc);
1127                 for (i = 0; i < mqprio->num_tc; i++)
1128                         netdev_set_tc_queue(dev, i,
1129                                             mqprio->count[i],
1130                                             mqprio->offset[i]);
1131
1132                 /* Always use supplied priority mappings */
1133                 for (i = 0; i < TC_BITMASK + 1; i++)
1134                         netdev_set_prio_tc_map(dev, i,
1135                                                mqprio->prio_tc_map[i]);
1136         }
1137
1138         switch (q->clockid) {
1139         case CLOCK_REALTIME:
1140                 q->tk_offset = TK_OFFS_REAL;
1141                 break;
1142         case CLOCK_MONOTONIC:
1143                 q->tk_offset = TK_OFFS_MAX;
1144                 break;
1145         case CLOCK_BOOTTIME:
1146                 q->tk_offset = TK_OFFS_BOOT;
1147                 break;
1148         case CLOCK_TAI:
1149                 q->tk_offset = TK_OFFS_TAI;
1150                 break;
1151         default:
1152                 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1153                 err = -EINVAL;
1154                 goto unlock;
1155         }
1156
1157         err = taprio_get_start_time(sch, new_admin, &start);
1158         if (err < 0) {
1159                 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1160                 goto unlock;
1161         }
1162
1163         if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) {
1164                 setup_txtime(q, new_admin, start);
1165
1166                 if (!oper) {
1167                         rcu_assign_pointer(q->oper_sched, new_admin);
1168                         err = 0;
1169                         new_admin = NULL;
1170                         goto unlock;
1171                 }
1172
1173                 rcu_assign_pointer(q->admin_sched, new_admin);
1174                 if (admin)
1175                         call_rcu(&admin->rcu, taprio_free_sched_cb);
1176         } else {
1177                 setup_first_close_time(q, new_admin, start);
1178
1179                 /* Protects against advance_sched() */
1180                 spin_lock_irqsave(&q->current_entry_lock, flags);
1181
1182                 taprio_start_sched(sch, start, new_admin);
1183
1184                 rcu_assign_pointer(q->admin_sched, new_admin);
1185                 if (admin)
1186                         call_rcu(&admin->rcu, taprio_free_sched_cb);
1187
1188                 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1189         }
1190
1191         new_admin = NULL;
1192         err = 0;
1193
1194 unlock:
1195         spin_unlock_bh(qdisc_lock(sch));
1196
1197 free_sched:
1198         kfree(new_admin);
1199
1200         return err;
1201 }
1202
1203 static void taprio_destroy(struct Qdisc *sch)
1204 {
1205         struct taprio_sched *q = qdisc_priv(sch);
1206         struct net_device *dev = qdisc_dev(sch);
1207         unsigned int i;
1208
1209         spin_lock(&taprio_list_lock);
1210         list_del(&q->taprio_list);
1211         spin_unlock(&taprio_list_lock);
1212
1213         hrtimer_cancel(&q->advance_timer);
1214
1215         if (q->qdiscs) {
1216                 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
1217                         qdisc_put(q->qdiscs[i]);
1218
1219                 kfree(q->qdiscs);
1220         }
1221         q->qdiscs = NULL;
1222
1223         netdev_set_num_tc(dev, 0);
1224
1225         if (q->oper_sched)
1226                 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1227
1228         if (q->admin_sched)
1229                 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1230 }
1231
1232 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1233                        struct netlink_ext_ack *extack)
1234 {
1235         struct taprio_sched *q = qdisc_priv(sch);
1236         struct net_device *dev = qdisc_dev(sch);
1237         int i;
1238
1239         spin_lock_init(&q->current_entry_lock);
1240
1241         hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1242         q->advance_timer.function = advance_sched;
1243
1244         q->root = sch;
1245
1246         /* We only support static clockids. Use an invalid value as default
1247          * and get the valid one on taprio_change().
1248          */
1249         q->clockid = -1;
1250
1251         if (sch->parent != TC_H_ROOT)
1252                 return -EOPNOTSUPP;
1253
1254         if (!netif_is_multiqueue(dev))
1255                 return -EOPNOTSUPP;
1256
1257         /* pre-allocate qdisc, attachment can't fail */
1258         q->qdiscs = kcalloc(dev->num_tx_queues,
1259                             sizeof(q->qdiscs[0]),
1260                             GFP_KERNEL);
1261
1262         if (!q->qdiscs)
1263                 return -ENOMEM;
1264
1265         if (!opt)
1266                 return -EINVAL;
1267
1268         spin_lock(&taprio_list_lock);
1269         list_add(&q->taprio_list, &taprio_list);
1270         spin_unlock(&taprio_list_lock);
1271
1272         for (i = 0; i < dev->num_tx_queues; i++) {
1273                 struct netdev_queue *dev_queue;
1274                 struct Qdisc *qdisc;
1275
1276                 dev_queue = netdev_get_tx_queue(dev, i);
1277                 qdisc = qdisc_create_dflt(dev_queue,
1278                                           &pfifo_qdisc_ops,
1279                                           TC_H_MAKE(TC_H_MAJ(sch->handle),
1280                                                     TC_H_MIN(i + 1)),
1281                                           extack);
1282                 if (!qdisc)
1283                         return -ENOMEM;
1284
1285                 if (i < dev->real_num_tx_queues)
1286                         qdisc_hash_add(qdisc, false);
1287
1288                 q->qdiscs[i] = qdisc;
1289         }
1290
1291         return taprio_change(sch, opt, extack);
1292 }
1293
1294 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1295                                              unsigned long cl)
1296 {
1297         struct net_device *dev = qdisc_dev(sch);
1298         unsigned long ntx = cl - 1;
1299
1300         if (ntx >= dev->num_tx_queues)
1301                 return NULL;
1302
1303         return netdev_get_tx_queue(dev, ntx);
1304 }
1305
1306 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1307                         struct Qdisc *new, struct Qdisc **old,
1308                         struct netlink_ext_ack *extack)
1309 {
1310         struct taprio_sched *q = qdisc_priv(sch);
1311         struct net_device *dev = qdisc_dev(sch);
1312         struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1313
1314         if (!dev_queue)
1315                 return -EINVAL;
1316
1317         if (dev->flags & IFF_UP)
1318                 dev_deactivate(dev);
1319
1320         *old = q->qdiscs[cl - 1];
1321         q->qdiscs[cl - 1] = new;
1322
1323         if (new)
1324                 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1325
1326         if (dev->flags & IFF_UP)
1327                 dev_activate(dev);
1328
1329         return 0;
1330 }
1331
1332 static int dump_entry(struct sk_buff *msg,
1333                       const struct sched_entry *entry)
1334 {
1335         struct nlattr *item;
1336
1337         item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1338         if (!item)
1339                 return -ENOSPC;
1340
1341         if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1342                 goto nla_put_failure;
1343
1344         if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1345                 goto nla_put_failure;
1346
1347         if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1348                         entry->gate_mask))
1349                 goto nla_put_failure;
1350
1351         if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1352                         entry->interval))
1353                 goto nla_put_failure;
1354
1355         return nla_nest_end(msg, item);
1356
1357 nla_put_failure:
1358         nla_nest_cancel(msg, item);
1359         return -1;
1360 }
1361
1362 static int dump_schedule(struct sk_buff *msg,
1363                          const struct sched_gate_list *root)
1364 {
1365         struct nlattr *entry_list;
1366         struct sched_entry *entry;
1367
1368         if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1369                         root->base_time, TCA_TAPRIO_PAD))
1370                 return -1;
1371
1372         if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1373                         root->cycle_time, TCA_TAPRIO_PAD))
1374                 return -1;
1375
1376         if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1377                         root->cycle_time_extension, TCA_TAPRIO_PAD))
1378                 return -1;
1379
1380         entry_list = nla_nest_start_noflag(msg,
1381                                            TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1382         if (!entry_list)
1383                 goto error_nest;
1384
1385         list_for_each_entry(entry, &root->entries, list) {
1386                 if (dump_entry(msg, entry) < 0)
1387                         goto error_nest;
1388         }
1389
1390         nla_nest_end(msg, entry_list);
1391         return 0;
1392
1393 error_nest:
1394         nla_nest_cancel(msg, entry_list);
1395         return -1;
1396 }
1397
1398 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1399 {
1400         struct taprio_sched *q = qdisc_priv(sch);
1401         struct net_device *dev = qdisc_dev(sch);
1402         struct sched_gate_list *oper, *admin;
1403         struct tc_mqprio_qopt opt = { 0 };
1404         struct nlattr *nest, *sched_nest;
1405         unsigned int i;
1406
1407         rcu_read_lock();
1408         oper = rcu_dereference(q->oper_sched);
1409         admin = rcu_dereference(q->admin_sched);
1410
1411         opt.num_tc = netdev_get_num_tc(dev);
1412         memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1413
1414         for (i = 0; i < netdev_get_num_tc(dev); i++) {
1415                 opt.count[i] = dev->tc_to_txq[i].count;
1416                 opt.offset[i] = dev->tc_to_txq[i].offset;
1417         }
1418
1419         nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1420         if (!nest)
1421                 goto start_error;
1422
1423         if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1424                 goto options_error;
1425
1426         if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1427                 goto options_error;
1428
1429         if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1430                 goto options_error;
1431
1432         if (q->txtime_delay &&
1433             nla_put_s32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1434                 goto options_error;
1435
1436         if (oper && dump_schedule(skb, oper))
1437                 goto options_error;
1438
1439         if (!admin)
1440                 goto done;
1441
1442         sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1443         if (!sched_nest)
1444                 goto options_error;
1445
1446         if (dump_schedule(skb, admin))
1447                 goto admin_error;
1448
1449         nla_nest_end(skb, sched_nest);
1450
1451 done:
1452         rcu_read_unlock();
1453
1454         return nla_nest_end(skb, nest);
1455
1456 admin_error:
1457         nla_nest_cancel(skb, sched_nest);
1458
1459 options_error:
1460         nla_nest_cancel(skb, nest);
1461
1462 start_error:
1463         rcu_read_unlock();
1464         return -ENOSPC;
1465 }
1466
1467 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1468 {
1469         struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1470
1471         if (!dev_queue)
1472                 return NULL;
1473
1474         return dev_queue->qdisc_sleeping;
1475 }
1476
1477 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1478 {
1479         unsigned int ntx = TC_H_MIN(classid);
1480
1481         if (!taprio_queue_get(sch, ntx))
1482                 return 0;
1483         return ntx;
1484 }
1485
1486 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1487                              struct sk_buff *skb, struct tcmsg *tcm)
1488 {
1489         struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1490
1491         tcm->tcm_parent = TC_H_ROOT;
1492         tcm->tcm_handle |= TC_H_MIN(cl);
1493         tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1494
1495         return 0;
1496 }
1497
1498 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1499                                    struct gnet_dump *d)
1500         __releases(d->lock)
1501         __acquires(d->lock)
1502 {
1503         struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1504
1505         sch = dev_queue->qdisc_sleeping;
1506         if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1507             qdisc_qstats_copy(d, sch) < 0)
1508                 return -1;
1509         return 0;
1510 }
1511
1512 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1513 {
1514         struct net_device *dev = qdisc_dev(sch);
1515         unsigned long ntx;
1516
1517         if (arg->stop)
1518                 return;
1519
1520         arg->count = arg->skip;
1521         for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1522                 if (arg->fn(sch, ntx + 1, arg) < 0) {
1523                         arg->stop = 1;
1524                         break;
1525                 }
1526                 arg->count++;
1527         }
1528 }
1529
1530 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1531                                                 struct tcmsg *tcm)
1532 {
1533         return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1534 }
1535
1536 static const struct Qdisc_class_ops taprio_class_ops = {
1537         .graft          = taprio_graft,
1538         .leaf           = taprio_leaf,
1539         .find           = taprio_find,
1540         .walk           = taprio_walk,
1541         .dump           = taprio_dump_class,
1542         .dump_stats     = taprio_dump_class_stats,
1543         .select_queue   = taprio_select_queue,
1544 };
1545
1546 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1547         .cl_ops         = &taprio_class_ops,
1548         .id             = "taprio",
1549         .priv_size      = sizeof(struct taprio_sched),
1550         .init           = taprio_init,
1551         .change         = taprio_change,
1552         .destroy        = taprio_destroy,
1553         .peek           = taprio_peek,
1554         .dequeue        = taprio_dequeue,
1555         .enqueue        = taprio_enqueue,
1556         .dump           = taprio_dump,
1557         .owner          = THIS_MODULE,
1558 };
1559
1560 static struct notifier_block taprio_device_notifier = {
1561         .notifier_call = taprio_dev_notifier,
1562 };
1563
1564 static int __init taprio_module_init(void)
1565 {
1566         int err = register_netdevice_notifier(&taprio_device_notifier);
1567
1568         if (err)
1569                 return err;
1570
1571         return register_qdisc(&taprio_qdisc_ops);
1572 }
1573
1574 static void __exit taprio_module_exit(void)
1575 {
1576         unregister_qdisc(&taprio_qdisc_ops);
1577         unregister_netdevice_notifier(&taprio_device_notifier);
1578 }
1579
1580 module_init(taprio_module_init);
1581 module_exit(taprio_module_exit);
1582 MODULE_LICENSE("GPL");