1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 /* COMMON Applications Kept Enhanced (CAKE) discipline
5 * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
6 * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
7 * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
8 * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
9 * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
10 * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
12 * The CAKE Principles:
13 * (or, how to have your cake and eat it too)
15 * This is a combination of several shaping, AQM and FQ techniques into one
16 * easy-to-use package:
18 * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
19 * equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
20 * eliminating the need for any sort of burst parameter (eg. token bucket
21 * depth). Burst support is limited to that necessary to overcome scheduling
24 * - A Diffserv-aware priority queue, giving more priority to certain classes,
25 * up to a specified fraction of bandwidth. Above that bandwidth threshold,
26 * the priority is reduced to avoid starving other tins.
28 * - Each priority tin has a separate Flow Queue system, to isolate traffic
29 * flows from each other. This prevents a burst on one flow from increasing
30 * the delay to another. Flows are distributed to queues using a
31 * set-associative hash function.
33 * - Each queue is actively managed by Cobalt, which is a combination of the
34 * Codel and Blue AQM algorithms. This serves flows fairly, and signals
35 * congestion early via ECN (if available) and/or packet drops, to keep
36 * latency low. The codel parameters are auto-tuned based on the bandwidth
37 * setting, as is necessary at low bandwidths.
39 * The configuration parameters are kept deliberately simple for ease of use.
40 * Everything has sane defaults. Complete generality of configuration is *not*
43 * The priority queue operates according to a weighted DRR scheme, combined with
44 * a bandwidth tracker which reuses the shaper logic to detect which side of the
45 * bandwidth sharing threshold the tin is operating. This determines whether a
46 * priority-based weight (high) or a bandwidth-based weight (low) is used for
47 * that tin in the current pass.
49 * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
50 * granted us permission to leverage.
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/jiffies.h>
57 #include <linux/string.h>
59 #include <linux/errno.h>
60 #include <linux/init.h>
61 #include <linux/skbuff.h>
62 #include <linux/jhash.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <linux/reciprocal_div.h>
66 #include <net/netlink.h>
67 #include <linux/if_vlan.h>
68 #include <net/pkt_sched.h>
69 #include <net/pkt_cls.h>
71 #include <net/flow_dissector.h>
73 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
74 #include <net/netfilter/nf_conntrack_core.h>
77 #define CAKE_SET_WAYS (8)
78 #define CAKE_MAX_TINS (8)
79 #define CAKE_QUEUES (1024)
80 #define CAKE_FLOW_MASK 63
81 #define CAKE_FLOW_NAT_FLAG 64
83 /* struct cobalt_params - contains codel and blue parameters
84 * @interval: codel initial drop rate
85 * @target: maximum persistent sojourn time & blue update rate
86 * @mtu_time: serialisation delay of maximum-size packet
87 * @p_inc: increment of blue drop probability (0.32 fxp)
88 * @p_dec: decrement of blue drop probability (0.32 fxp)
90 struct cobalt_params {
98 /* struct cobalt_vars - contains codel and blue variables
99 * @count: codel dropping frequency
100 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
101 * @drop_next: time to drop next packet, or when we dropped last
102 * @blue_timer: Blue time to next drop
103 * @p_drop: BLUE drop probability (0.32 fxp)
104 * @dropping: set if in dropping state
105 * @ecn_marked: set if marked
120 CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
126 /* this stuff is all needed per-flow at dequeue time */
127 struct sk_buff *head;
128 struct sk_buff *tail;
129 struct list_head flowchain;
132 struct cobalt_vars cvars;
133 u16 srchost; /* index into cake_host table */
136 }; /* please try to keep this structure <= 64 bytes */
141 u16 srchost_bulk_flow_count;
142 u16 dsthost_bulk_flow_count;
145 struct cake_heap_entry {
149 struct cake_tin_data {
150 struct cake_flow flows[CAKE_QUEUES];
151 u32 backlogs[CAKE_QUEUES];
152 u32 tags[CAKE_QUEUES]; /* for set association */
153 u16 overflow_idx[CAKE_QUEUES];
154 struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
157 struct cobalt_params cparams;
160 u16 sparse_flow_count;
161 u16 decaying_flow_count;
162 u16 unresponsive_flow_count;
166 struct list_head new_flows;
167 struct list_head old_flows;
168 struct list_head decaying_flows;
170 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
171 ktime_t time_next_packet;
187 /* moving averages */
192 /* hash function stats */
197 }; /* number of tins is small, so size of this struct doesn't matter much */
199 struct cake_sched_data {
200 struct tcf_proto __rcu *filter_list; /* optional external classifier */
201 struct tcf_block *block;
202 struct cake_tin_data *tins;
204 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
205 u16 overflow_timeout;
216 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
218 ktime_t time_next_packet;
219 ktime_t failsafe_next_packet;
228 /* resource tracking */
232 u32 buffer_config_limit;
234 /* indices for dequeue */
238 struct qdisc_watchdog watchdog;
242 /* bandwidth capacity estimate */
243 ktime_t last_packet_time;
244 ktime_t avg_window_begin;
245 u64 avg_packet_interval;
246 u64 avg_window_bytes;
247 u64 avg_peak_bandwidth;
248 ktime_t last_reconfig_time;
250 /* packet length stats */
259 CAKE_FLAG_OVERHEAD = BIT(0),
260 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
261 CAKE_FLAG_INGRESS = BIT(2),
262 CAKE_FLAG_WASH = BIT(3),
263 CAKE_FLAG_SPLIT_GSO = BIT(4)
266 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
267 * obtain the best features of each. Codel is excellent on flows which
268 * respond to congestion signals in a TCP-like way. BLUE is more effective on
269 * unresponsive flows.
272 struct cobalt_skb_cb {
273 ktime_t enqueue_time;
277 static u64 us_to_ns(u64 us)
279 return us * NSEC_PER_USEC;
282 static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
284 qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
285 return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
288 static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
290 return get_cobalt_cb(skb)->enqueue_time;
293 static void cobalt_set_enqueue_time(struct sk_buff *skb,
296 get_cobalt_cb(skb)->enqueue_time = now;
299 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
301 /* Diffserv lookup tables */
303 static const u8 precedence[] = {
304 0, 0, 0, 0, 0, 0, 0, 0,
305 1, 1, 1, 1, 1, 1, 1, 1,
306 2, 2, 2, 2, 2, 2, 2, 2,
307 3, 3, 3, 3, 3, 3, 3, 3,
308 4, 4, 4, 4, 4, 4, 4, 4,
309 5, 5, 5, 5, 5, 5, 5, 5,
310 6, 6, 6, 6, 6, 6, 6, 6,
311 7, 7, 7, 7, 7, 7, 7, 7,
314 static const u8 diffserv8[] = {
315 2, 5, 1, 2, 4, 2, 2, 2,
316 0, 2, 1, 2, 1, 2, 1, 2,
317 5, 2, 4, 2, 4, 2, 4, 2,
318 3, 2, 3, 2, 3, 2, 3, 2,
319 6, 2, 3, 2, 3, 2, 3, 2,
320 6, 2, 2, 2, 6, 2, 6, 2,
321 7, 2, 2, 2, 2, 2, 2, 2,
322 7, 2, 2, 2, 2, 2, 2, 2,
325 static const u8 diffserv4[] = {
326 0, 2, 0, 0, 2, 0, 0, 0,
327 1, 0, 0, 0, 0, 0, 0, 0,
328 2, 0, 2, 0, 2, 0, 2, 0,
329 2, 0, 2, 0, 2, 0, 2, 0,
330 3, 0, 2, 0, 2, 0, 2, 0,
331 3, 0, 0, 0, 3, 0, 3, 0,
332 3, 0, 0, 0, 0, 0, 0, 0,
333 3, 0, 0, 0, 0, 0, 0, 0,
336 static const u8 diffserv3[] = {
337 0, 0, 0, 0, 2, 0, 0, 0,
338 1, 0, 0, 0, 0, 0, 0, 0,
339 0, 0, 0, 0, 0, 0, 0, 0,
340 0, 0, 0, 0, 0, 0, 0, 0,
341 0, 0, 0, 0, 0, 0, 0, 0,
342 0, 0, 0, 0, 2, 0, 2, 0,
343 2, 0, 0, 0, 0, 0, 0, 0,
344 2, 0, 0, 0, 0, 0, 0, 0,
347 static const u8 besteffort[] = {
348 0, 0, 0, 0, 0, 0, 0, 0,
349 0, 0, 0, 0, 0, 0, 0, 0,
350 0, 0, 0, 0, 0, 0, 0, 0,
351 0, 0, 0, 0, 0, 0, 0, 0,
352 0, 0, 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0, 0, 0,
354 0, 0, 0, 0, 0, 0, 0, 0,
355 0, 0, 0, 0, 0, 0, 0, 0,
358 /* tin priority order for stats dumping */
360 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
361 static const u8 bulk_order[] = {1, 0, 2, 3};
363 #define REC_INV_SQRT_CACHE (16)
364 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
366 /* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
367 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
369 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
372 static void cobalt_newton_step(struct cobalt_vars *vars)
374 u32 invsqrt, invsqrt2;
377 invsqrt = vars->rec_inv_sqrt;
378 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
379 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
381 val >>= 2; /* avoid overflow in following multiply */
382 val = (val * invsqrt) >> (32 - 2 + 1);
384 vars->rec_inv_sqrt = val;
387 static void cobalt_invsqrt(struct cobalt_vars *vars)
389 if (vars->count < REC_INV_SQRT_CACHE)
390 vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
392 cobalt_newton_step(vars);
395 /* There is a big difference in timing between the accurate values placed in
396 * the cache and the approximations given by a single Newton step for small
397 * count values, particularly when stepping from count 1 to 2 or vice versa.
398 * Above 16, a single Newton step gives sufficient accuracy in either
399 * direction, given the precision stored.
401 * The magnitude of the error when stepping up to count 2 is such as to give
402 * the value that *should* have been produced at count 4.
405 static void cobalt_cache_init(void)
407 struct cobalt_vars v;
409 memset(&v, 0, sizeof(v));
410 v.rec_inv_sqrt = ~0U;
411 cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
413 for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
414 cobalt_newton_step(&v);
415 cobalt_newton_step(&v);
416 cobalt_newton_step(&v);
417 cobalt_newton_step(&v);
419 cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
423 static void cobalt_vars_init(struct cobalt_vars *vars)
425 memset(vars, 0, sizeof(*vars));
427 if (!cobalt_rec_inv_sqrt_cache[0]) {
429 cobalt_rec_inv_sqrt_cache[0] = ~0;
433 /* CoDel control_law is t + interval/sqrt(count)
434 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
435 * both sqrt() and divide operation.
437 static ktime_t cobalt_control(ktime_t t,
441 return ktime_add_ns(t, reciprocal_scale(interval,
445 /* Call this when a packet had to be dropped due to queue overflow. Returns
446 * true if the BLUE state was quiescent before but active after this call.
448 static bool cobalt_queue_full(struct cobalt_vars *vars,
449 struct cobalt_params *p,
454 if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
456 vars->p_drop += p->p_inc;
457 if (vars->p_drop < p->p_inc)
459 vars->blue_timer = now;
461 vars->dropping = true;
462 vars->drop_next = now;
469 /* Call this when the queue was serviced but turned out to be empty. Returns
470 * true if the BLUE state was active before but quiescent after this call.
472 static bool cobalt_queue_empty(struct cobalt_vars *vars,
473 struct cobalt_params *p,
479 ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
480 if (vars->p_drop < p->p_dec)
483 vars->p_drop -= p->p_dec;
484 vars->blue_timer = now;
485 down = !vars->p_drop;
487 vars->dropping = false;
489 if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
491 cobalt_invsqrt(vars);
492 vars->drop_next = cobalt_control(vars->drop_next,
500 /* Call this with a freshly dequeued packet for possible congestion marking.
501 * Returns true as an instruction to drop the packet, false for delivery.
503 static bool cobalt_should_drop(struct cobalt_vars *vars,
504 struct cobalt_params *p,
509 bool next_due, over_target, drop = false;
513 /* The 'schedule' variable records, in its sign, whether 'now' is before or
514 * after 'drop_next'. This allows 'drop_next' to be updated before the next
515 * scheduling decision is actually branched, without destroying that
516 * information. Similarly, the first 'schedule' value calculated is preserved
517 * in the boolean 'next_due'.
519 * As for 'drop_next', we take advantage of the fact that 'interval' is both
520 * the delay between first exceeding 'target' and the first signalling event,
521 * *and* the scaling factor for the signalling frequency. It's therefore very
522 * natural to use a single mechanism for both purposes, and eliminates a
523 * significant amount of reference Codel's spaghetti code. To help with this,
524 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
525 * as possible to 1.0 in fixed-point.
528 sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
529 schedule = ktime_sub(now, vars->drop_next);
530 over_target = sojourn > p->target &&
531 sojourn > p->mtu_time * bulk_flows * 2 &&
532 sojourn > p->mtu_time * 4;
533 next_due = vars->count && ktime_to_ns(schedule) >= 0;
535 vars->ecn_marked = false;
538 if (!vars->dropping) {
539 vars->dropping = true;
540 vars->drop_next = cobalt_control(now,
546 } else if (vars->dropping) {
547 vars->dropping = false;
550 if (next_due && vars->dropping) {
551 /* Use ECN mark if possible, otherwise drop */
552 drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
557 cobalt_invsqrt(vars);
558 vars->drop_next = cobalt_control(vars->drop_next,
561 schedule = ktime_sub(now, vars->drop_next);
565 cobalt_invsqrt(vars);
566 vars->drop_next = cobalt_control(vars->drop_next,
569 schedule = ktime_sub(now, vars->drop_next);
570 next_due = vars->count && ktime_to_ns(schedule) >= 0;
574 /* Simple BLUE implementation. Lack of ECN is deliberate. */
576 drop |= (prandom_u32() < vars->p_drop);
578 /* Overload the drop_next field as an activity timeout */
580 vars->drop_next = ktime_add_ns(now, p->interval);
581 else if (ktime_to_ns(schedule) > 0 && !drop)
582 vars->drop_next = now;
587 static bool cake_update_flowkeys(struct flow_keys *keys,
588 const struct sk_buff *skb)
590 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
591 struct nf_conntrack_tuple tuple = {};
592 bool rev = !skb->_nfct, upd = false;
595 if (tc_skb_protocol(skb) != htons(ETH_P_IP))
598 if (!nf_ct_get_tuple_skb(&tuple, skb))
601 ip = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
602 if (ip != keys->addrs.v4addrs.src) {
603 keys->addrs.v4addrs.src = ip;
606 ip = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
607 if (ip != keys->addrs.v4addrs.dst) {
608 keys->addrs.v4addrs.dst = ip;
612 if (keys->ports.ports) {
615 port = rev ? tuple.dst.u.all : tuple.src.u.all;
616 if (port != keys->ports.src) {
617 keys->ports.src = port;
620 port = rev ? tuple.src.u.all : tuple.dst.u.all;
621 if (port != keys->ports.dst) {
622 port = keys->ports.dst;
632 /* Cake has several subtle multiple bit settings. In these cases you
633 * would be matching triple isolate mode as well.
636 static bool cake_dsrc(int flow_mode)
638 return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
641 static bool cake_ddst(int flow_mode)
643 return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
646 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
647 int flow_mode, u16 flow_override, u16 host_override)
649 bool hash_flows = (!flow_override && !!(flow_mode & CAKE_FLOW_FLOWS));
650 bool hash_hosts = (!host_override && !!(flow_mode & CAKE_FLOW_HOSTS));
651 bool nat_enabled = !!(flow_mode & CAKE_FLOW_NAT_FLAG);
652 u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
653 u16 reduced_hash, srchost_idx, dsthost_idx;
654 struct flow_keys keys, host_keys;
655 bool use_skbhash = skb->l4_hash;
657 if (unlikely(flow_mode == CAKE_FLOW_NONE))
660 /* If both overrides are set, or we can use the SKB hash and nat mode is
661 * disabled, we can skip packet dissection entirely. If nat mode is
662 * enabled there's another check below after doing the conntrack lookup.
664 if ((!hash_flows || (use_skbhash && !nat_enabled)) && !hash_hosts)
667 skb_flow_dissect_flow_keys(skb, &keys,
668 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
670 /* Don't use the SKB hash if we change the lookup keys from conntrack */
671 if (nat_enabled && cake_update_flowkeys(&keys, skb))
674 /* If we can still use the SKB hash and don't need the host hash, we can
675 * skip the rest of the hashing procedure
677 if (use_skbhash && !hash_hosts)
680 /* flow_hash_from_keys() sorts the addresses by value, so we have
681 * to preserve their order in a separate data structure to treat
682 * src and dst host addresses as independently selectable.
685 host_keys.ports.ports = 0;
686 host_keys.basic.ip_proto = 0;
687 host_keys.keyid.keyid = 0;
688 host_keys.tags.flow_label = 0;
690 switch (host_keys.control.addr_type) {
691 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
692 host_keys.addrs.v4addrs.src = 0;
693 dsthost_hash = flow_hash_from_keys(&host_keys);
694 host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
695 host_keys.addrs.v4addrs.dst = 0;
696 srchost_hash = flow_hash_from_keys(&host_keys);
699 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
700 memset(&host_keys.addrs.v6addrs.src, 0,
701 sizeof(host_keys.addrs.v6addrs.src));
702 dsthost_hash = flow_hash_from_keys(&host_keys);
703 host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
704 memset(&host_keys.addrs.v6addrs.dst, 0,
705 sizeof(host_keys.addrs.v6addrs.dst));
706 srchost_hash = flow_hash_from_keys(&host_keys);
714 /* This *must* be after the above switch, since as a
715 * side-effect it sorts the src and dst addresses.
717 if (hash_flows && !use_skbhash)
718 flow_hash = flow_hash_from_keys(&keys);
722 flow_hash = flow_override - 1;
723 else if (use_skbhash)
724 flow_hash = skb->hash;
726 dsthost_hash = host_override - 1;
727 srchost_hash = host_override - 1;
730 if (!(flow_mode & CAKE_FLOW_FLOWS)) {
731 if (flow_mode & CAKE_FLOW_SRC_IP)
732 flow_hash ^= srchost_hash;
734 if (flow_mode & CAKE_FLOW_DST_IP)
735 flow_hash ^= dsthost_hash;
738 reduced_hash = flow_hash % CAKE_QUEUES;
740 /* set-associative hashing */
741 /* fast path if no hash collision (direct lookup succeeds) */
742 if (likely(q->tags[reduced_hash] == flow_hash &&
743 q->flows[reduced_hash].set)) {
746 u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
747 u32 outer_hash = reduced_hash - inner_hash;
748 bool allocate_src = false;
749 bool allocate_dst = false;
752 /* check if any active queue in the set is reserved for
755 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
756 i++, k = (k + 1) % CAKE_SET_WAYS) {
757 if (q->tags[outer_hash + k] == flow_hash) {
761 if (!q->flows[outer_hash + k].set) {
762 /* need to increment host refcnts */
763 allocate_src = cake_dsrc(flow_mode);
764 allocate_dst = cake_ddst(flow_mode);
771 /* no queue is reserved for this flow, look for an
774 for (i = 0; i < CAKE_SET_WAYS;
775 i++, k = (k + 1) % CAKE_SET_WAYS) {
776 if (!q->flows[outer_hash + k].set) {
778 allocate_src = cake_dsrc(flow_mode);
779 allocate_dst = cake_ddst(flow_mode);
784 /* With no empty queues, default to the original
785 * queue, accept the collision, update the host tags.
788 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
789 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
790 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
792 allocate_src = cake_dsrc(flow_mode);
793 allocate_dst = cake_ddst(flow_mode);
795 /* reserve queue for future packets in same flow */
796 reduced_hash = outer_hash + k;
797 q->tags[reduced_hash] = flow_hash;
800 srchost_idx = srchost_hash % CAKE_QUEUES;
801 inner_hash = srchost_idx % CAKE_SET_WAYS;
802 outer_hash = srchost_idx - inner_hash;
803 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
804 i++, k = (k + 1) % CAKE_SET_WAYS) {
805 if (q->hosts[outer_hash + k].srchost_tag ==
809 for (i = 0; i < CAKE_SET_WAYS;
810 i++, k = (k + 1) % CAKE_SET_WAYS) {
811 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
814 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
816 srchost_idx = outer_hash + k;
817 if (q->flows[reduced_hash].set == CAKE_SET_BULK)
818 q->hosts[srchost_idx].srchost_bulk_flow_count++;
819 q->flows[reduced_hash].srchost = srchost_idx;
823 dsthost_idx = dsthost_hash % CAKE_QUEUES;
824 inner_hash = dsthost_idx % CAKE_SET_WAYS;
825 outer_hash = dsthost_idx - inner_hash;
826 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
827 i++, k = (k + 1) % CAKE_SET_WAYS) {
828 if (q->hosts[outer_hash + k].dsthost_tag ==
832 for (i = 0; i < CAKE_SET_WAYS;
833 i++, k = (k + 1) % CAKE_SET_WAYS) {
834 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
837 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
839 dsthost_idx = outer_hash + k;
840 if (q->flows[reduced_hash].set == CAKE_SET_BULK)
841 q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
842 q->flows[reduced_hash].dsthost = dsthost_idx;
849 /* helper functions : might be changed when/if skb use a standard list_head */
850 /* remove one skb from head of slot queue */
852 static struct sk_buff *dequeue_head(struct cake_flow *flow)
854 struct sk_buff *skb = flow->head;
857 flow->head = skb->next;
858 skb_mark_not_on_list(skb);
864 /* add skb to flow queue (tail add) */
866 static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
871 flow->tail->next = skb;
876 static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
879 unsigned int offset = skb_network_offset(skb);
882 iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
887 if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
888 return skb_header_pointer(skb, offset + iph->ihl * 4,
889 sizeof(struct ipv6hdr), buf);
891 else if (iph->version == 4)
894 else if (iph->version == 6)
895 return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
901 static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
902 void *buf, unsigned int bufsize)
904 unsigned int offset = skb_network_offset(skb);
905 const struct ipv6hdr *ipv6h;
906 const struct tcphdr *tcph;
907 const struct iphdr *iph;
908 struct ipv6hdr _ipv6h;
911 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
916 if (ipv6h->version == 4) {
917 iph = (struct iphdr *)ipv6h;
918 offset += iph->ihl * 4;
920 /* special-case 6in4 tunnelling, as that is a common way to get
921 * v6 connectivity in the home
923 if (iph->protocol == IPPROTO_IPV6) {
924 ipv6h = skb_header_pointer(skb, offset,
925 sizeof(_ipv6h), &_ipv6h);
927 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
930 offset += sizeof(struct ipv6hdr);
932 } else if (iph->protocol != IPPROTO_TCP) {
936 } else if (ipv6h->version == 6) {
937 if (ipv6h->nexthdr != IPPROTO_TCP)
940 offset += sizeof(struct ipv6hdr);
945 tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
949 return skb_header_pointer(skb, offset,
950 min(__tcp_hdrlen(tcph), bufsize), buf);
953 static const void *cake_get_tcpopt(const struct tcphdr *tcph,
954 int code, int *oplen)
956 /* inspired by tcp_parse_options in tcp_input.c */
957 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
958 const u8 *ptr = (const u8 *)(tcph + 1);
964 if (opcode == TCPOPT_EOL)
966 if (opcode == TCPOPT_NOP) {
971 if (opsize < 2 || opsize > length)
974 if (opcode == code) {
986 /* Compare two SACK sequences. A sequence is considered greater if it SACKs more
987 * bytes than the other. In the case where both sequences ACKs bytes that the
988 * other doesn't, A is considered greater. DSACKs in A also makes A be
989 * considered greater.
991 * @return -1, 0 or 1 as normal compare functions
993 static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
994 const struct tcphdr *tcph_b)
996 const struct tcp_sack_block_wire *sack_a, *sack_b;
997 u32 ack_seq_a = ntohl(tcph_a->ack_seq);
998 u32 bytes_a = 0, bytes_b = 0;
999 int oplen_a, oplen_b;
1002 sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
1003 sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
1005 /* pointers point to option contents */
1006 oplen_a -= TCPOLEN_SACK_BASE;
1007 oplen_b -= TCPOLEN_SACK_BASE;
1009 if (sack_a && oplen_a >= sizeof(*sack_a) &&
1010 (!sack_b || oplen_b < sizeof(*sack_b)))
1012 else if (sack_b && oplen_b >= sizeof(*sack_b) &&
1013 (!sack_a || oplen_a < sizeof(*sack_a)))
1015 else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
1016 (!sack_b || oplen_b < sizeof(*sack_b)))
1019 while (oplen_a >= sizeof(*sack_a)) {
1020 const struct tcp_sack_block_wire *sack_tmp = sack_b;
1021 u32 start_a = get_unaligned_be32(&sack_a->start_seq);
1022 u32 end_a = get_unaligned_be32(&sack_a->end_seq);
1023 int oplen_tmp = oplen_b;
1026 /* DSACK; always considered greater to prevent dropping */
1027 if (before(start_a, ack_seq_a))
1030 bytes_a += end_a - start_a;
1032 while (oplen_tmp >= sizeof(*sack_tmp)) {
1033 u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
1034 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
1036 /* first time through we count the total size */
1038 bytes_b += end_b - start_b;
1040 if (!after(start_b, start_a) && !before(end_b, end_a)) {
1045 oplen_tmp -= sizeof(*sack_tmp);
1052 oplen_a -= sizeof(*sack_a);
1057 /* If we made it this far, all ranges SACKed by A are covered by B, so
1058 * either the SACKs are equal, or B SACKs more bytes.
1060 return bytes_b > bytes_a ? 1 : 0;
1063 static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
1064 u32 *tsval, u32 *tsecr)
1069 ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
1071 if (ptr && opsize == TCPOLEN_TIMESTAMP) {
1072 *tsval = get_unaligned_be32(ptr);
1073 *tsecr = get_unaligned_be32(ptr + 4);
1077 static bool cake_tcph_may_drop(const struct tcphdr *tcph,
1078 u32 tstamp_new, u32 tsecr_new)
1080 /* inspired by tcp_parse_options in tcp_input.c */
1081 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
1082 const u8 *ptr = (const u8 *)(tcph + 1);
1085 /* 3 reserved flags must be unset to avoid future breakage
1087 * ECE/CWR are handled separately
1088 * All other flags URG/PSH/RST/SYN/FIN must be unset
1089 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
1090 * 0x00C00000 = CWR/ECE (handled separately)
1091 * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
1093 if (((tcp_flag_word(tcph) &
1094 cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
1097 while (length > 0) {
1098 int opcode = *ptr++;
1101 if (opcode == TCPOPT_EOL)
1103 if (opcode == TCPOPT_NOP) {
1108 if (opsize < 2 || opsize > length)
1112 case TCPOPT_MD5SIG: /* doesn't influence state */
1115 case TCPOPT_SACK: /* stricter checking performed later */
1116 if (opsize % 8 != 2)
1120 case TCPOPT_TIMESTAMP:
1121 /* only drop timestamps lower than new */
1122 if (opsize != TCPOLEN_TIMESTAMP)
1124 tstamp = get_unaligned_be32(ptr);
1125 tsecr = get_unaligned_be32(ptr + 4);
1126 if (after(tstamp, tstamp_new) ||
1127 after(tsecr, tsecr_new))
1131 case TCPOPT_MSS: /* these should only be set on SYN */
1133 case TCPOPT_SACK_PERM:
1134 case TCPOPT_FASTOPEN:
1136 default: /* don't drop if any unknown options are present */
1147 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
1148 struct cake_flow *flow)
1150 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
1151 struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
1152 struct sk_buff *skb_check, *skb_prev = NULL;
1153 const struct ipv6hdr *ipv6h, *ipv6h_check;
1154 unsigned char _tcph[64], _tcph_check[64];
1155 const struct tcphdr *tcph, *tcph_check;
1156 const struct iphdr *iph, *iph_check;
1157 struct ipv6hdr _iph, _iph_check;
1158 const struct sk_buff *skb;
1159 int seglen, num_found = 0;
1160 u32 tstamp = 0, tsecr = 0;
1161 __be32 elig_flags = 0;
1164 /* no other possible ACKs to filter */
1165 if (flow->head == flow->tail)
1169 tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
1170 iph = cake_get_iphdr(skb, &_iph);
1174 cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
1176 /* the 'triggering' packet need only have the ACK flag set.
1177 * also check that SYN is not set, as there won't be any previous ACKs.
1179 if ((tcp_flag_word(tcph) &
1180 (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
1183 /* the 'triggering' ACK is at the tail of the queue, we have already
1184 * returned if it is the only packet in the flow. loop through the rest
1185 * of the queue looking for pure ACKs with the same 5-tuple as the
1188 for (skb_check = flow->head;
1189 skb_check && skb_check != skb;
1190 skb_prev = skb_check, skb_check = skb_check->next) {
1191 iph_check = cake_get_iphdr(skb_check, &_iph_check);
1192 tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
1193 sizeof(_tcph_check));
1195 /* only TCP packets with matching 5-tuple are eligible, and only
1198 if (!tcph_check || iph->version != iph_check->version ||
1199 tcph_check->source != tcph->source ||
1200 tcph_check->dest != tcph->dest)
1203 if (iph_check->version == 4) {
1204 if (iph_check->saddr != iph->saddr ||
1205 iph_check->daddr != iph->daddr)
1208 seglen = ntohs(iph_check->tot_len) -
1209 (4 * iph_check->ihl);
1210 } else if (iph_check->version == 6) {
1211 ipv6h = (struct ipv6hdr *)iph;
1212 ipv6h_check = (struct ipv6hdr *)iph_check;
1214 if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
1215 ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
1218 seglen = ntohs(ipv6h_check->payload_len);
1220 WARN_ON(1); /* shouldn't happen */
1224 /* If the ECE/CWR flags changed from the previous eligible
1225 * packet in the same flow, we should no longer be dropping that
1226 * previous packet as this would lose information.
1228 if (elig_ack && (tcp_flag_word(tcph_check) &
1229 (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
1231 elig_ack_prev = NULL;
1235 /* Check TCP options and flags, don't drop ACKs with segment
1236 * data, and don't drop ACKs with a higher cumulative ACK
1237 * counter than the triggering packet. Check ACK seqno here to
1238 * avoid parsing SACK options of packets we are going to exclude
1241 if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
1242 (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
1243 after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
1246 /* Check SACK options. The triggering packet must SACK more data
1247 * than the ACK under consideration, or SACK the same range but
1248 * have a larger cumulative ACK counter. The latter is a
1249 * pathological case, but is contained in the following check
1250 * anyway, just to be safe.
1252 sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
1254 if (sack_comp < 0 ||
1255 (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
1259 /* At this point we have found an eligible pure ACK to drop; if
1260 * we are in aggressive mode, we are done. Otherwise, keep
1261 * searching unless this is the second eligible ACK we
1264 * Since we want to drop ACK closest to the head of the queue,
1265 * save the first eligible ACK we find, even if we need to loop
1269 elig_ack = skb_check;
1270 elig_ack_prev = skb_prev;
1271 elig_flags = (tcp_flag_word(tcph_check)
1272 & (TCP_FLAG_ECE | TCP_FLAG_CWR));
1275 if (num_found++ > 0)
1279 /* We made it through the queue without finding two eligible ACKs . If
1280 * we found a single eligible ACK we can drop it in aggressive mode if
1281 * we can guarantee that this does not interfere with ECN flag
1282 * information. We ensure this by dropping it only if the enqueued
1283 * packet is consecutive with the eligible ACK, and their flags match.
1285 if (elig_ack && aggressive && elig_ack->next == skb &&
1286 (elig_flags == (tcp_flag_word(tcph) &
1287 (TCP_FLAG_ECE | TCP_FLAG_CWR))))
1294 elig_ack_prev->next = elig_ack->next;
1296 flow->head = elig_ack->next;
1298 skb_mark_not_on_list(elig_ack);
1303 static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
1305 avg -= avg >> shift;
1306 avg += sample >> shift;
1310 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
1312 if (q->rate_flags & CAKE_FLAG_OVERHEAD)
1315 if (q->max_netlen < len)
1316 q->max_netlen = len;
1317 if (q->min_netlen > len)
1318 q->min_netlen = len;
1320 len += q->rate_overhead;
1322 if (len < q->rate_mpu)
1325 if (q->atm_mode == CAKE_ATM_ATM) {
1329 } else if (q->atm_mode == CAKE_ATM_PTM) {
1330 /* Add one byte per 64 bytes or part thereof.
1331 * This is conservative and easier to calculate than the
1334 len += (len + 63) / 64;
1337 if (q->max_adjlen < len)
1338 q->max_adjlen = len;
1339 if (q->min_adjlen > len)
1340 q->min_adjlen = len;
1345 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
1347 const struct skb_shared_info *shinfo = skb_shinfo(skb);
1348 unsigned int hdr_len, last_len = 0;
1349 u32 off = skb_network_offset(skb);
1350 u32 len = qdisc_pkt_len(skb);
1353 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
1355 if (!shinfo->gso_size)
1356 return cake_calc_overhead(q, len, off);
1358 /* borrowed from qdisc_pkt_len_init() */
1359 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1361 /* + transport layer */
1362 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
1364 const struct tcphdr *th;
1365 struct tcphdr _tcphdr;
1367 th = skb_header_pointer(skb, skb_transport_offset(skb),
1368 sizeof(_tcphdr), &_tcphdr);
1370 hdr_len += __tcp_hdrlen(th);
1372 struct udphdr _udphdr;
1374 if (skb_header_pointer(skb, skb_transport_offset(skb),
1375 sizeof(_udphdr), &_udphdr))
1376 hdr_len += sizeof(struct udphdr);
1379 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
1380 segs = DIV_ROUND_UP(skb->len - hdr_len,
1383 segs = shinfo->gso_segs;
1385 len = shinfo->gso_size + hdr_len;
1386 last_len = skb->len - shinfo->gso_size * (segs - 1);
1388 return (cake_calc_overhead(q, len, off) * (segs - 1) +
1389 cake_calc_overhead(q, last_len, off));
1392 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
1394 struct cake_heap_entry ii = q->overflow_heap[i];
1395 struct cake_heap_entry jj = q->overflow_heap[j];
1397 q->overflow_heap[i] = jj;
1398 q->overflow_heap[j] = ii;
1400 q->tins[ii.t].overflow_idx[ii.b] = j;
1401 q->tins[jj.t].overflow_idx[jj.b] = i;
1404 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
1406 struct cake_heap_entry ii = q->overflow_heap[i];
1408 return q->tins[ii.t].backlogs[ii.b];
1411 static void cake_heapify(struct cake_sched_data *q, u16 i)
1413 static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
1414 u32 mb = cake_heap_get_backlog(q, i);
1422 u32 lb = cake_heap_get_backlog(q, l);
1431 u32 rb = cake_heap_get_backlog(q, r);
1440 cake_heap_swap(q, i, m);
1448 static void cake_heapify_up(struct cake_sched_data *q, u16 i)
1450 while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
1451 u16 p = (i - 1) >> 1;
1452 u32 ib = cake_heap_get_backlog(q, i);
1453 u32 pb = cake_heap_get_backlog(q, p);
1456 cake_heap_swap(q, i, p);
1464 static int cake_advance_shaper(struct cake_sched_data *q,
1465 struct cake_tin_data *b,
1466 struct sk_buff *skb,
1467 ktime_t now, bool drop)
1469 u32 len = get_cobalt_cb(skb)->adjusted_len;
1471 /* charge packet bandwidth to this tin
1472 * and to the global shaper.
1475 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
1476 u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
1477 u64 failsafe_dur = global_dur + (global_dur >> 1);
1479 if (ktime_before(b->time_next_packet, now))
1480 b->time_next_packet = ktime_add_ns(b->time_next_packet,
1483 else if (ktime_before(b->time_next_packet,
1484 ktime_add_ns(now, tin_dur)))
1485 b->time_next_packet = ktime_add_ns(now, tin_dur);
1487 q->time_next_packet = ktime_add_ns(q->time_next_packet,
1490 q->failsafe_next_packet = \
1491 ktime_add_ns(q->failsafe_next_packet,
1497 static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1499 struct cake_sched_data *q = qdisc_priv(sch);
1500 ktime_t now = ktime_get();
1501 u32 idx = 0, tin = 0, len;
1502 struct cake_heap_entry qq;
1503 struct cake_tin_data *b;
1504 struct cake_flow *flow;
1505 struct sk_buff *skb;
1507 if (!q->overflow_timeout) {
1509 /* Build fresh max-heap */
1510 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
1513 q->overflow_timeout = 65535;
1515 /* select longest queue for pruning */
1516 qq = q->overflow_heap[0];
1521 flow = &b->flows[idx];
1522 skb = dequeue_head(flow);
1523 if (unlikely(!skb)) {
1524 /* heap has gone wrong, rebuild it next time */
1525 q->overflow_timeout = 0;
1526 return idx + (tin << 16);
1529 if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
1530 b->unresponsive_flow_count++;
1532 len = qdisc_pkt_len(skb);
1533 q->buffer_used -= skb->truesize;
1534 b->backlogs[idx] -= len;
1535 b->tin_backlog -= len;
1536 sch->qstats.backlog -= len;
1537 qdisc_tree_reduce_backlog(sch, 1, len);
1541 sch->qstats.drops++;
1543 if (q->rate_flags & CAKE_FLAG_INGRESS)
1544 cake_advance_shaper(q, b, skb, now, true);
1546 __qdisc_drop(skb, to_free);
1551 return idx + (tin << 16);
1554 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
1556 const int offset = skb_network_offset(skb);
1560 switch (tc_skb_protocol(skb)) {
1561 case htons(ETH_P_IP):
1562 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1566 /* ToS is in the second byte of iphdr */
1567 dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
1570 const int wlen = offset + sizeof(struct iphdr);
1572 if (!pskb_may_pull(skb, wlen) ||
1573 skb_try_make_writable(skb, wlen))
1576 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1581 case htons(ETH_P_IPV6):
1582 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1586 /* Traffic class is in the first and second bytes of ipv6hdr */
1587 dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
1590 const int wlen = offset + sizeof(struct ipv6hdr);
1592 if (!pskb_may_pull(skb, wlen) ||
1593 skb_try_make_writable(skb, wlen))
1596 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
1601 case htons(ETH_P_ARP):
1602 return 0x38; /* CS7 - Net Control */
1605 /* If there is no Diffserv field, treat as best-effort */
1610 static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1611 struct sk_buff *skb)
1613 struct cake_sched_data *q = qdisc_priv(sch);
1617 /* Tin selection: Default to diffserv-based selection, allow overriding
1618 * using firewall marks or skb->priority.
1620 dscp = cake_handle_diffserv(skb,
1621 q->rate_flags & CAKE_FLAG_WASH);
1622 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
1624 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
1627 else if (mark && mark <= q->tin_cnt)
1628 tin = q->tin_order[mark - 1];
1630 else if (TC_H_MAJ(skb->priority) == sch->handle &&
1631 TC_H_MIN(skb->priority) > 0 &&
1632 TC_H_MIN(skb->priority) <= q->tin_cnt)
1633 tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
1636 tin = q->tin_index[dscp];
1638 if (unlikely(tin >= q->tin_cnt))
1642 return &q->tins[tin];
1645 static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1646 struct sk_buff *skb, int flow_mode, int *qerr)
1648 struct cake_sched_data *q = qdisc_priv(sch);
1649 struct tcf_proto *filter;
1650 struct tcf_result res;
1651 u16 flow = 0, host = 0;
1654 filter = rcu_dereference_bh(q->filter_list);
1658 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1659 result = tcf_classify(skb, filter, &res, false);
1662 #ifdef CONFIG_NET_CLS_ACT
1667 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1673 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1674 flow = TC_H_MIN(res.classid);
1675 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
1676 host = TC_H_MAJ(res.classid) >> 16;
1679 *t = cake_select_tin(sch, skb);
1680 return cake_hash(*t, skb, flow_mode, flow, host) + 1;
1683 static void cake_reconfigure(struct Qdisc *sch);
1685 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1686 struct sk_buff **to_free)
1688 struct cake_sched_data *q = qdisc_priv(sch);
1689 int len = qdisc_pkt_len(skb);
1690 int uninitialized_var(ret);
1691 struct sk_buff *ack = NULL;
1692 ktime_t now = ktime_get();
1693 struct cake_tin_data *b;
1694 struct cake_flow *flow;
1697 /* choose flow to insert into */
1698 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
1700 if (ret & __NET_XMIT_BYPASS)
1701 qdisc_qstats_drop(sch);
1702 __qdisc_drop(skb, to_free);
1706 flow = &b->flows[idx];
1708 /* ensure shaper state isn't stale */
1709 if (!b->tin_backlog) {
1710 if (ktime_before(b->time_next_packet, now))
1711 b->time_next_packet = now;
1714 if (ktime_before(q->time_next_packet, now)) {
1715 q->failsafe_next_packet = now;
1716 q->time_next_packet = now;
1717 } else if (ktime_after(q->time_next_packet, now) &&
1718 ktime_after(q->failsafe_next_packet, now)) {
1720 min(ktime_to_ns(q->time_next_packet),
1722 q->failsafe_next_packet));
1723 sch->qstats.overlimits++;
1724 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1729 if (unlikely(len > b->max_skblen))
1730 b->max_skblen = len;
1732 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1733 struct sk_buff *segs, *nskb;
1734 netdev_features_t features = netif_skb_features(skb);
1735 unsigned int slen = 0, numsegs = 0;
1737 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1738 if (IS_ERR_OR_NULL(segs))
1739 return qdisc_drop(skb, sch, to_free);
1741 skb_list_walk_safe(segs, segs, nskb) {
1742 skb_mark_not_on_list(segs);
1743 qdisc_skb_cb(segs)->pkt_len = segs->len;
1744 cobalt_set_enqueue_time(segs, now);
1745 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
1747 flow_queue_add(flow, segs);
1752 q->buffer_used += segs->truesize;
1758 b->backlogs[idx] += slen;
1759 b->tin_backlog += slen;
1760 sch->qstats.backlog += slen;
1761 q->avg_window_bytes += slen;
1763 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1767 cobalt_set_enqueue_time(skb, now);
1768 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
1769 flow_queue_add(flow, skb);
1772 ack = cake_ack_filter(q, flow);
1776 sch->qstats.drops++;
1777 b->bytes += qdisc_pkt_len(ack);
1778 len -= qdisc_pkt_len(ack);
1779 q->buffer_used += skb->truesize - ack->truesize;
1780 if (q->rate_flags & CAKE_FLAG_INGRESS)
1781 cake_advance_shaper(q, b, ack, now, true);
1783 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1787 q->buffer_used += skb->truesize;
1793 b->backlogs[idx] += len;
1794 b->tin_backlog += len;
1795 sch->qstats.backlog += len;
1796 q->avg_window_bytes += len;
1799 if (q->overflow_timeout)
1800 cake_heapify_up(q, b->overflow_idx[idx]);
1802 /* incoming bandwidth capacity estimate */
1803 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
1804 u64 packet_interval = \
1805 ktime_to_ns(ktime_sub(now, q->last_packet_time));
1807 if (packet_interval > NSEC_PER_SEC)
1808 packet_interval = NSEC_PER_SEC;
1810 /* filter out short-term bursts, eg. wifi aggregation */
1811 q->avg_packet_interval = \
1812 cake_ewma(q->avg_packet_interval,
1814 (packet_interval > q->avg_packet_interval ?
1817 q->last_packet_time = now;
1819 if (packet_interval > q->avg_packet_interval) {
1820 u64 window_interval = \
1821 ktime_to_ns(ktime_sub(now,
1822 q->avg_window_begin));
1823 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1825 b = div64_u64(b, window_interval);
1826 q->avg_peak_bandwidth =
1827 cake_ewma(q->avg_peak_bandwidth, b,
1828 b > q->avg_peak_bandwidth ? 2 : 8);
1829 q->avg_window_bytes = 0;
1830 q->avg_window_begin = now;
1832 if (ktime_after(now,
1833 ktime_add_ms(q->last_reconfig_time,
1835 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1836 cake_reconfigure(sch);
1840 q->avg_window_bytes = 0;
1841 q->last_packet_time = now;
1845 if (!flow->set || flow->set == CAKE_SET_DECAYING) {
1846 struct cake_host *srchost = &b->hosts[flow->srchost];
1847 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1851 list_add_tail(&flow->flowchain, &b->new_flows);
1853 b->decaying_flow_count--;
1854 list_move_tail(&flow->flowchain, &b->new_flows);
1856 flow->set = CAKE_SET_SPARSE;
1857 b->sparse_flow_count++;
1859 if (cake_dsrc(q->flow_mode))
1860 host_load = max(host_load, srchost->srchost_bulk_flow_count);
1862 if (cake_ddst(q->flow_mode))
1863 host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
1865 flow->deficit = (b->flow_quantum *
1866 quantum_div[host_load]) >> 16;
1867 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1868 struct cake_host *srchost = &b->hosts[flow->srchost];
1869 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1871 /* this flow was empty, accounted as a sparse flow, but actually
1872 * in the bulk rotation.
1874 flow->set = CAKE_SET_BULK;
1875 b->sparse_flow_count--;
1876 b->bulk_flow_count++;
1878 if (cake_dsrc(q->flow_mode))
1879 srchost->srchost_bulk_flow_count++;
1881 if (cake_ddst(q->flow_mode))
1882 dsthost->dsthost_bulk_flow_count++;
1886 if (q->buffer_used > q->buffer_max_used)
1887 q->buffer_max_used = q->buffer_used;
1889 if (q->buffer_used > q->buffer_limit) {
1892 while (q->buffer_used > q->buffer_limit) {
1894 cake_drop(sch, to_free);
1896 b->drop_overlimit += dropped;
1898 return NET_XMIT_SUCCESS;
1901 static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
1903 struct cake_sched_data *q = qdisc_priv(sch);
1904 struct cake_tin_data *b = &q->tins[q->cur_tin];
1905 struct cake_flow *flow = &b->flows[q->cur_flow];
1906 struct sk_buff *skb = NULL;
1910 skb = dequeue_head(flow);
1911 len = qdisc_pkt_len(skb);
1912 b->backlogs[q->cur_flow] -= len;
1913 b->tin_backlog -= len;
1914 sch->qstats.backlog -= len;
1915 q->buffer_used -= skb->truesize;
1918 if (q->overflow_timeout)
1919 cake_heapify(q, b->overflow_idx[q->cur_flow]);
1924 /* Discard leftover packets from a tin no longer in use. */
1925 static void cake_clear_tin(struct Qdisc *sch, u16 tin)
1927 struct cake_sched_data *q = qdisc_priv(sch);
1928 struct sk_buff *skb;
1931 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
1932 while (!!(skb = cake_dequeue_one(sch)))
1936 static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1938 struct cake_sched_data *q = qdisc_priv(sch);
1939 struct cake_tin_data *b = &q->tins[q->cur_tin];
1940 struct cake_host *srchost, *dsthost;
1941 ktime_t now = ktime_get();
1942 struct cake_flow *flow;
1943 struct list_head *head;
1944 bool first_flow = true;
1945 struct sk_buff *skb;
1954 /* global hard shaper */
1955 if (ktime_after(q->time_next_packet, now) &&
1956 ktime_after(q->failsafe_next_packet, now)) {
1957 u64 next = min(ktime_to_ns(q->time_next_packet),
1958 ktime_to_ns(q->failsafe_next_packet));
1960 sch->qstats.overlimits++;
1961 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1965 /* Choose a class to work on. */
1967 /* In unlimited mode, can't rely on shaper timings, just balance
1970 bool wrapped = false, empty = true;
1972 while (b->tin_deficit < 0 ||
1973 !(b->sparse_flow_count + b->bulk_flow_count)) {
1974 if (b->tin_deficit <= 0)
1975 b->tin_deficit += b->tin_quantum;
1976 if (b->sparse_flow_count + b->bulk_flow_count)
1981 if (q->cur_tin >= q->tin_cnt) {
1986 /* It's possible for q->qlen to be
1987 * nonzero when we actually have no
1998 /* In shaped mode, choose:
1999 * - Highest-priority tin with queue and meeting schedule, or
2000 * - The earliest-scheduled tin with queue.
2002 ktime_t best_time = KTIME_MAX;
2003 int tin, best_tin = 0;
2005 for (tin = 0; tin < q->tin_cnt; tin++) {
2007 if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
2008 ktime_t time_to_pkt = \
2009 ktime_sub(b->time_next_packet, now);
2011 if (ktime_to_ns(time_to_pkt) <= 0 ||
2012 ktime_compare(time_to_pkt,
2014 best_time = time_to_pkt;
2020 q->cur_tin = best_tin;
2021 b = q->tins + best_tin;
2023 /* No point in going further if no packets to deliver. */
2024 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
2029 /* service this class */
2030 head = &b->decaying_flows;
2031 if (!first_flow || list_empty(head)) {
2032 head = &b->new_flows;
2033 if (list_empty(head)) {
2034 head = &b->old_flows;
2035 if (unlikely(list_empty(head))) {
2036 head = &b->decaying_flows;
2037 if (unlikely(list_empty(head)))
2042 flow = list_first_entry(head, struct cake_flow, flowchain);
2043 q->cur_flow = flow - b->flows;
2046 /* triple isolation (modified DRR++) */
2047 srchost = &b->hosts[flow->srchost];
2048 dsthost = &b->hosts[flow->dsthost];
2051 /* flow isolation (DRR++) */
2052 if (flow->deficit <= 0) {
2053 /* Keep all flows with deficits out of the sparse and decaying
2054 * rotations. No non-empty flow can go into the decaying
2055 * rotation, so they can't get deficits
2057 if (flow->set == CAKE_SET_SPARSE) {
2059 b->sparse_flow_count--;
2060 b->bulk_flow_count++;
2062 if (cake_dsrc(q->flow_mode))
2063 srchost->srchost_bulk_flow_count++;
2065 if (cake_ddst(q->flow_mode))
2066 dsthost->dsthost_bulk_flow_count++;
2068 flow->set = CAKE_SET_BULK;
2070 /* we've moved it to the bulk rotation for
2071 * correct deficit accounting but we still want
2072 * to count it as a sparse flow, not a bulk one.
2074 flow->set = CAKE_SET_SPARSE_WAIT;
2078 if (cake_dsrc(q->flow_mode))
2079 host_load = max(host_load, srchost->srchost_bulk_flow_count);
2081 if (cake_ddst(q->flow_mode))
2082 host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
2084 WARN_ON(host_load > CAKE_QUEUES);
2086 /* The shifted prandom_u32() is a way to apply dithering to
2087 * avoid accumulating roundoff errors
2089 flow->deficit += (b->flow_quantum * quantum_div[host_load] +
2090 (prandom_u32() >> 16)) >> 16;
2091 list_move_tail(&flow->flowchain, &b->old_flows);
2096 /* Retrieve a packet via the AQM */
2098 skb = cake_dequeue_one(sch);
2100 /* this queue was actually empty */
2101 if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
2102 b->unresponsive_flow_count--;
2104 if (flow->cvars.p_drop || flow->cvars.count ||
2105 ktime_before(now, flow->cvars.drop_next)) {
2106 /* keep in the flowchain until the state has
2109 list_move_tail(&flow->flowchain,
2110 &b->decaying_flows);
2111 if (flow->set == CAKE_SET_BULK) {
2112 b->bulk_flow_count--;
2114 if (cake_dsrc(q->flow_mode))
2115 srchost->srchost_bulk_flow_count--;
2117 if (cake_ddst(q->flow_mode))
2118 dsthost->dsthost_bulk_flow_count--;
2120 b->decaying_flow_count++;
2121 } else if (flow->set == CAKE_SET_SPARSE ||
2122 flow->set == CAKE_SET_SPARSE_WAIT) {
2123 b->sparse_flow_count--;
2124 b->decaying_flow_count++;
2126 flow->set = CAKE_SET_DECAYING;
2128 /* remove empty queue from the flowchain */
2129 list_del_init(&flow->flowchain);
2130 if (flow->set == CAKE_SET_SPARSE ||
2131 flow->set == CAKE_SET_SPARSE_WAIT)
2132 b->sparse_flow_count--;
2133 else if (flow->set == CAKE_SET_BULK) {
2134 b->bulk_flow_count--;
2136 if (cake_dsrc(q->flow_mode))
2137 srchost->srchost_bulk_flow_count--;
2139 if (cake_ddst(q->flow_mode))
2140 dsthost->dsthost_bulk_flow_count--;
2143 b->decaying_flow_count--;
2145 flow->set = CAKE_SET_NONE;
2150 /* Last packet in queue may be marked, shouldn't be dropped */
2151 if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
2152 (b->bulk_flow_count *
2154 CAKE_FLAG_INGRESS))) ||
2158 /* drop this packet, get another one */
2159 if (q->rate_flags & CAKE_FLAG_INGRESS) {
2160 len = cake_advance_shaper(q, b, skb,
2162 flow->deficit -= len;
2163 b->tin_deficit -= len;
2167 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
2168 qdisc_qstats_drop(sch);
2170 if (q->rate_flags & CAKE_FLAG_INGRESS)
2174 b->tin_ecn_mark += !!flow->cvars.ecn_marked;
2175 qdisc_bstats_update(sch, skb);
2177 /* collect delay stats */
2178 delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
2179 b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
2180 b->peak_delay = cake_ewma(b->peak_delay, delay,
2181 delay > b->peak_delay ? 2 : 8);
2182 b->base_delay = cake_ewma(b->base_delay, delay,
2183 delay < b->base_delay ? 2 : 8);
2185 len = cake_advance_shaper(q, b, skb, now, false);
2186 flow->deficit -= len;
2187 b->tin_deficit -= len;
2189 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
2190 u64 next = min(ktime_to_ns(q->time_next_packet),
2191 ktime_to_ns(q->failsafe_next_packet));
2193 qdisc_watchdog_schedule_ns(&q->watchdog, next);
2194 } else if (!sch->q.qlen) {
2197 for (i = 0; i < q->tin_cnt; i++) {
2198 if (q->tins[i].decaying_flow_count) {
2201 q->tins[i].cparams.target);
2203 qdisc_watchdog_schedule_ns(&q->watchdog,
2210 if (q->overflow_timeout)
2211 q->overflow_timeout--;
2216 static void cake_reset(struct Qdisc *sch)
2220 for (c = 0; c < CAKE_MAX_TINS; c++)
2221 cake_clear_tin(sch, c);
2224 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2225 [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
2226 [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
2227 [TCA_CAKE_ATM] = { .type = NLA_U32 },
2228 [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
2229 [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
2230 [TCA_CAKE_RTT] = { .type = NLA_U32 },
2231 [TCA_CAKE_TARGET] = { .type = NLA_U32 },
2232 [TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
2233 [TCA_CAKE_MEMORY] = { .type = NLA_U32 },
2234 [TCA_CAKE_NAT] = { .type = NLA_U32 },
2235 [TCA_CAKE_RAW] = { .type = NLA_U32 },
2236 [TCA_CAKE_WASH] = { .type = NLA_U32 },
2237 [TCA_CAKE_MPU] = { .type = NLA_U32 },
2238 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
2239 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
2240 [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 },
2241 [TCA_CAKE_FWMARK] = { .type = NLA_U32 },
2244 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
2245 u64 target_ns, u64 rtt_est_ns)
2247 /* convert byte-rate into time-per-byte
2248 * so it will always unwedge in reasonable time.
2250 static const u64 MIN_RATE = 64;
2251 u32 byte_target = mtu;
2256 b->flow_quantum = 1514;
2258 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
2260 rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
2261 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
2262 while (!!(rate_ns >> 34)) {
2266 } /* else unlimited, ie. zero delay */
2268 b->tin_rate_bps = rate;
2269 b->tin_rate_ns = rate_ns;
2270 b->tin_rate_shft = rate_shft;
2272 byte_target_ns = (byte_target * rate_ns) >> rate_shft;
2274 b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
2275 b->cparams.interval = max(rtt_est_ns +
2276 b->cparams.target - target_ns,
2277 b->cparams.target * 2);
2278 b->cparams.mtu_time = byte_target_ns;
2279 b->cparams.p_inc = 1 << 24; /* 1/256 */
2280 b->cparams.p_dec = 1 << 20; /* 1/4096 */
2283 static int cake_config_besteffort(struct Qdisc *sch)
2285 struct cake_sched_data *q = qdisc_priv(sch);
2286 struct cake_tin_data *b = &q->tins[0];
2287 u32 mtu = psched_mtu(qdisc_dev(sch));
2288 u64 rate = q->rate_bps;
2292 q->tin_index = besteffort;
2293 q->tin_order = normal_order;
2295 cake_set_rate(b, rate, mtu,
2296 us_to_ns(q->target), us_to_ns(q->interval));
2297 b->tin_quantum = 65535;
2302 static int cake_config_precedence(struct Qdisc *sch)
2304 /* convert high-level (user visible) parameters into internal format */
2305 struct cake_sched_data *q = qdisc_priv(sch);
2306 u32 mtu = psched_mtu(qdisc_dev(sch));
2307 u64 rate = q->rate_bps;
2312 q->tin_index = precedence;
2313 q->tin_order = normal_order;
2315 for (i = 0; i < q->tin_cnt; i++) {
2316 struct cake_tin_data *b = &q->tins[i];
2318 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2319 us_to_ns(q->interval));
2321 b->tin_quantum = max_t(u16, 1U, quantum);
2323 /* calculate next class's parameters */
2334 /* List of known Diffserv codepoints:
2336 * Least Effort (CS1)
2338 * Max Reliability & LLT "Lo" (TOS1)
2339 * Max Throughput (TOS2)
2342 * Assured Forwarding 1 (AF1x) - x3
2343 * Assured Forwarding 2 (AF2x) - x3
2344 * Assured Forwarding 3 (AF3x) - x3
2345 * Assured Forwarding 4 (AF4x) - x3
2346 * Precedence Class 2 (CS2)
2347 * Precedence Class 3 (CS3)
2348 * Precedence Class 4 (CS4)
2349 * Precedence Class 5 (CS5)
2350 * Precedence Class 6 (CS6)
2351 * Precedence Class 7 (CS7)
2353 * Expedited Forwarding (EF)
2355 * Total 25 codepoints.
2358 /* List of traffic classes in RFC 4594:
2359 * (roughly descending order of contended priority)
2360 * (roughly ascending order of uncontended throughput)
2362 * Network Control (CS6,CS7) - routing traffic
2363 * Telephony (EF,VA) - aka. VoIP streams
2364 * Signalling (CS5) - VoIP setup
2365 * Multimedia Conferencing (AF4x) - aka. video calls
2366 * Realtime Interactive (CS4) - eg. games
2367 * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
2368 * Broadcast Video (CS3)
2369 * Low Latency Data (AF2x,TOS4) - eg. database
2370 * Ops, Admin, Management (CS2,TOS1) - eg. ssh
2371 * Standard Service (CS0 & unrecognised codepoints)
2372 * High Throughput Data (AF1x,TOS2) - eg. web traffic
2373 * Low Priority Data (CS1) - eg. BitTorrent
2375 * Total 12 traffic classes.
2378 static int cake_config_diffserv8(struct Qdisc *sch)
2380 /* Pruned list of traffic classes for typical applications:
2382 * Network Control (CS6, CS7)
2383 * Minimum Latency (EF, VA, CS5, CS4)
2384 * Interactive Shell (CS2, TOS1)
2385 * Low Latency Transactions (AF2x, TOS4)
2386 * Video Streaming (AF4x, AF3x, CS3)
2387 * Bog Standard (CS0 etc.)
2388 * High Throughput (AF1x, TOS2)
2389 * Background Traffic (CS1)
2391 * Total 8 traffic classes.
2394 struct cake_sched_data *q = qdisc_priv(sch);
2395 u32 mtu = psched_mtu(qdisc_dev(sch));
2396 u64 rate = q->rate_bps;
2402 /* codepoint to class mapping */
2403 q->tin_index = diffserv8;
2404 q->tin_order = normal_order;
2406 /* class characteristics */
2407 for (i = 0; i < q->tin_cnt; i++) {
2408 struct cake_tin_data *b = &q->tins[i];
2410 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2411 us_to_ns(q->interval));
2413 b->tin_quantum = max_t(u16, 1U, quantum);
2415 /* calculate next class's parameters */
2426 static int cake_config_diffserv4(struct Qdisc *sch)
2428 /* Further pruned list of traffic classes for four-class system:
2430 * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
2431 * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
2432 * Best Effort (CS0, AF1x, TOS2, and those not specified)
2433 * Background Traffic (CS1)
2435 * Total 4 traffic classes.
2438 struct cake_sched_data *q = qdisc_priv(sch);
2439 u32 mtu = psched_mtu(qdisc_dev(sch));
2440 u64 rate = q->rate_bps;
2445 /* codepoint to class mapping */
2446 q->tin_index = diffserv4;
2447 q->tin_order = bulk_order;
2449 /* class characteristics */
2450 cake_set_rate(&q->tins[0], rate, mtu,
2451 us_to_ns(q->target), us_to_ns(q->interval));
2452 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2453 us_to_ns(q->target), us_to_ns(q->interval));
2454 cake_set_rate(&q->tins[2], rate >> 1, mtu,
2455 us_to_ns(q->target), us_to_ns(q->interval));
2456 cake_set_rate(&q->tins[3], rate >> 2, mtu,
2457 us_to_ns(q->target), us_to_ns(q->interval));
2459 /* bandwidth-sharing weights */
2460 q->tins[0].tin_quantum = quantum;
2461 q->tins[1].tin_quantum = quantum >> 4;
2462 q->tins[2].tin_quantum = quantum >> 1;
2463 q->tins[3].tin_quantum = quantum >> 2;
2468 static int cake_config_diffserv3(struct Qdisc *sch)
2470 /* Simplified Diffserv structure with 3 tins.
2471 * Low Priority (CS1)
2473 * Latency Sensitive (TOS4, VA, EF, CS6, CS7)
2475 struct cake_sched_data *q = qdisc_priv(sch);
2476 u32 mtu = psched_mtu(qdisc_dev(sch));
2477 u64 rate = q->rate_bps;
2482 /* codepoint to class mapping */
2483 q->tin_index = diffserv3;
2484 q->tin_order = bulk_order;
2486 /* class characteristics */
2487 cake_set_rate(&q->tins[0], rate, mtu,
2488 us_to_ns(q->target), us_to_ns(q->interval));
2489 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2490 us_to_ns(q->target), us_to_ns(q->interval));
2491 cake_set_rate(&q->tins[2], rate >> 2, mtu,
2492 us_to_ns(q->target), us_to_ns(q->interval));
2494 /* bandwidth-sharing weights */
2495 q->tins[0].tin_quantum = quantum;
2496 q->tins[1].tin_quantum = quantum >> 4;
2497 q->tins[2].tin_quantum = quantum >> 2;
2502 static void cake_reconfigure(struct Qdisc *sch)
2504 struct cake_sched_data *q = qdisc_priv(sch);
2507 switch (q->tin_mode) {
2508 case CAKE_DIFFSERV_BESTEFFORT:
2509 ft = cake_config_besteffort(sch);
2512 case CAKE_DIFFSERV_PRECEDENCE:
2513 ft = cake_config_precedence(sch);
2516 case CAKE_DIFFSERV_DIFFSERV8:
2517 ft = cake_config_diffserv8(sch);
2520 case CAKE_DIFFSERV_DIFFSERV4:
2521 ft = cake_config_diffserv4(sch);
2524 case CAKE_DIFFSERV_DIFFSERV3:
2526 ft = cake_config_diffserv3(sch);
2530 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
2531 cake_clear_tin(sch, c);
2532 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
2535 q->rate_ns = q->tins[ft].tin_rate_ns;
2536 q->rate_shft = q->tins[ft].tin_rate_shft;
2538 if (q->buffer_config_limit) {
2539 q->buffer_limit = q->buffer_config_limit;
2540 } else if (q->rate_bps) {
2541 u64 t = q->rate_bps * q->interval;
2543 do_div(t, USEC_PER_SEC / 4);
2544 q->buffer_limit = max_t(u32, t, 4U << 20);
2546 q->buffer_limit = ~0;
2549 sch->flags &= ~TCQ_F_CAN_BYPASS;
2551 q->buffer_limit = min(q->buffer_limit,
2552 max(sch->limit * psched_mtu(qdisc_dev(sch)),
2553 q->buffer_config_limit));
2556 static int cake_change(struct Qdisc *sch, struct nlattr *opt,
2557 struct netlink_ext_ack *extack)
2559 struct cake_sched_data *q = qdisc_priv(sch);
2560 struct nlattr *tb[TCA_CAKE_MAX + 1];
2566 err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
2571 if (tb[TCA_CAKE_NAT]) {
2572 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
2573 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
2574 q->flow_mode |= CAKE_FLOW_NAT_FLAG *
2575 !!nla_get_u32(tb[TCA_CAKE_NAT]);
2577 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
2578 "No conntrack support in kernel");
2583 if (tb[TCA_CAKE_BASE_RATE64])
2584 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
2586 if (tb[TCA_CAKE_DIFFSERV_MODE])
2587 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
2589 if (tb[TCA_CAKE_WASH]) {
2590 if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
2591 q->rate_flags |= CAKE_FLAG_WASH;
2593 q->rate_flags &= ~CAKE_FLAG_WASH;
2596 if (tb[TCA_CAKE_FLOW_MODE])
2597 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
2598 (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
2601 if (tb[TCA_CAKE_ATM])
2602 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
2604 if (tb[TCA_CAKE_OVERHEAD]) {
2605 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
2606 q->rate_flags |= CAKE_FLAG_OVERHEAD;
2614 if (tb[TCA_CAKE_RAW]) {
2615 q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
2623 if (tb[TCA_CAKE_MPU])
2624 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
2626 if (tb[TCA_CAKE_RTT]) {
2627 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
2633 if (tb[TCA_CAKE_TARGET]) {
2634 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
2640 if (tb[TCA_CAKE_AUTORATE]) {
2641 if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
2642 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
2644 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
2647 if (tb[TCA_CAKE_INGRESS]) {
2648 if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
2649 q->rate_flags |= CAKE_FLAG_INGRESS;
2651 q->rate_flags &= ~CAKE_FLAG_INGRESS;
2654 if (tb[TCA_CAKE_ACK_FILTER])
2655 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
2657 if (tb[TCA_CAKE_MEMORY])
2658 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
2660 if (tb[TCA_CAKE_SPLIT_GSO]) {
2661 if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
2662 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2664 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
2667 if (tb[TCA_CAKE_FWMARK]) {
2668 q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
2669 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
2674 cake_reconfigure(sch);
2675 sch_tree_unlock(sch);
2681 static void cake_destroy(struct Qdisc *sch)
2683 struct cake_sched_data *q = qdisc_priv(sch);
2685 qdisc_watchdog_cancel(&q->watchdog);
2686 tcf_block_put(q->block);
2690 static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2691 struct netlink_ext_ack *extack)
2693 struct cake_sched_data *q = qdisc_priv(sch);
2697 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
2698 q->flow_mode = CAKE_FLOW_TRIPLE;
2700 q->rate_bps = 0; /* unlimited by default */
2702 q->interval = 100000; /* 100ms default */
2703 q->target = 5000; /* 5ms: codel RFC argues
2704 * for 5 to 10% of interval
2706 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2710 qdisc_watchdog_init(&q->watchdog, sch);
2713 int err = cake_change(sch, opt, extack);
2719 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2723 quantum_div[0] = ~0;
2724 for (i = 1; i <= CAKE_QUEUES; i++)
2725 quantum_div[i] = 65535 / i;
2727 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
2732 for (i = 0; i < CAKE_MAX_TINS; i++) {
2733 struct cake_tin_data *b = q->tins + i;
2735 INIT_LIST_HEAD(&b->new_flows);
2736 INIT_LIST_HEAD(&b->old_flows);
2737 INIT_LIST_HEAD(&b->decaying_flows);
2738 b->sparse_flow_count = 0;
2739 b->bulk_flow_count = 0;
2740 b->decaying_flow_count = 0;
2742 for (j = 0; j < CAKE_QUEUES; j++) {
2743 struct cake_flow *flow = b->flows + j;
2744 u32 k = j * CAKE_MAX_TINS + i;
2746 INIT_LIST_HEAD(&flow->flowchain);
2747 cobalt_vars_init(&flow->cvars);
2749 q->overflow_heap[k].t = i;
2750 q->overflow_heap[k].b = j;
2751 b->overflow_idx[j] = k;
2755 cake_reconfigure(sch);
2756 q->avg_peak_bandwidth = q->rate_bps;
2766 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2768 struct cake_sched_data *q = qdisc_priv(sch);
2769 struct nlattr *opts;
2771 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
2773 goto nla_put_failure;
2775 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
2777 goto nla_put_failure;
2779 if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
2780 q->flow_mode & CAKE_FLOW_MASK))
2781 goto nla_put_failure;
2783 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
2784 goto nla_put_failure;
2786 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
2787 goto nla_put_failure;
2789 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
2790 goto nla_put_failure;
2792 if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
2793 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
2794 goto nla_put_failure;
2796 if (nla_put_u32(skb, TCA_CAKE_INGRESS,
2797 !!(q->rate_flags & CAKE_FLAG_INGRESS)))
2798 goto nla_put_failure;
2800 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
2801 goto nla_put_failure;
2803 if (nla_put_u32(skb, TCA_CAKE_NAT,
2804 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
2805 goto nla_put_failure;
2807 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
2808 goto nla_put_failure;
2810 if (nla_put_u32(skb, TCA_CAKE_WASH,
2811 !!(q->rate_flags & CAKE_FLAG_WASH)))
2812 goto nla_put_failure;
2814 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
2815 goto nla_put_failure;
2817 if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
2818 if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
2819 goto nla_put_failure;
2821 if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
2822 goto nla_put_failure;
2824 if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
2825 goto nla_put_failure;
2827 if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
2828 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
2829 goto nla_put_failure;
2831 if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
2832 goto nla_put_failure;
2834 return nla_nest_end(skb, opts);
2840 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2842 struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
2843 struct cake_sched_data *q = qdisc_priv(sch);
2844 struct nlattr *tstats, *ts;
2850 #define PUT_STAT_U32(attr, data) do { \
2851 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2852 goto nla_put_failure; \
2854 #define PUT_STAT_U64(attr, data) do { \
2855 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
2856 data, TCA_CAKE_STATS_PAD)) \
2857 goto nla_put_failure; \
2860 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
2861 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
2862 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
2863 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
2864 PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
2865 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
2866 PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
2867 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
2872 tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS);
2874 goto nla_put_failure;
2876 #define PUT_TSTAT_U32(attr, data) do { \
2877 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
2878 goto nla_put_failure; \
2880 #define PUT_TSTAT_U64(attr, data) do { \
2881 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
2882 data, TCA_CAKE_TIN_STATS_PAD)) \
2883 goto nla_put_failure; \
2886 for (i = 0; i < q->tin_cnt; i++) {
2887 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
2889 ts = nla_nest_start_noflag(d->skb, i + 1);
2891 goto nla_put_failure;
2893 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
2894 PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
2895 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
2897 PUT_TSTAT_U32(TARGET_US,
2898 ktime_to_us(ns_to_ktime(b->cparams.target)));
2899 PUT_TSTAT_U32(INTERVAL_US,
2900 ktime_to_us(ns_to_ktime(b->cparams.interval)));
2902 PUT_TSTAT_U32(SENT_PACKETS, b->packets);
2903 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
2904 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
2905 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
2907 PUT_TSTAT_U32(PEAK_DELAY_US,
2908 ktime_to_us(ns_to_ktime(b->peak_delay)));
2909 PUT_TSTAT_U32(AVG_DELAY_US,
2910 ktime_to_us(ns_to_ktime(b->avge_delay)));
2911 PUT_TSTAT_U32(BASE_DELAY_US,
2912 ktime_to_us(ns_to_ktime(b->base_delay)));
2914 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
2915 PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
2916 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
2918 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
2919 b->decaying_flow_count);
2920 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
2921 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
2922 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
2924 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
2925 nla_nest_end(d->skb, ts);
2928 #undef PUT_TSTAT_U32
2929 #undef PUT_TSTAT_U64
2931 nla_nest_end(d->skb, tstats);
2932 return nla_nest_end(d->skb, stats);
2935 nla_nest_cancel(d->skb, stats);
2939 static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
2944 static unsigned long cake_find(struct Qdisc *sch, u32 classid)
2949 static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
2955 static void cake_unbind(struct Qdisc *q, unsigned long cl)
2959 static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
2960 struct netlink_ext_ack *extack)
2962 struct cake_sched_data *q = qdisc_priv(sch);
2969 static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
2970 struct sk_buff *skb, struct tcmsg *tcm)
2972 tcm->tcm_handle |= TC_H_MIN(cl);
2976 static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2977 struct gnet_dump *d)
2979 struct cake_sched_data *q = qdisc_priv(sch);
2980 const struct cake_flow *flow = NULL;
2981 struct gnet_stats_queue qs = { 0 };
2982 struct nlattr *stats;
2985 if (idx < CAKE_QUEUES * q->tin_cnt) {
2986 const struct cake_tin_data *b = \
2987 &q->tins[q->tin_order[idx / CAKE_QUEUES]];
2988 const struct sk_buff *skb;
2990 flow = &b->flows[idx % CAKE_QUEUES];
2999 sch_tree_unlock(sch);
3001 qs.backlog = b->backlogs[idx % CAKE_QUEUES];
3002 qs.drops = flow->dropped;
3004 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
3007 ktime_t now = ktime_get();
3009 stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
3013 #define PUT_STAT_U32(attr, data) do { \
3014 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
3015 goto nla_put_failure; \
3017 #define PUT_STAT_S32(attr, data) do { \
3018 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
3019 goto nla_put_failure; \
3022 PUT_STAT_S32(DEFICIT, flow->deficit);
3023 PUT_STAT_U32(DROPPING, flow->cvars.dropping);
3024 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
3025 PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
3026 if (flow->cvars.p_drop) {
3027 PUT_STAT_S32(BLUE_TIMER_US,
3030 flow->cvars.blue_timer)));
3032 if (flow->cvars.dropping) {
3033 PUT_STAT_S32(DROP_NEXT_US,
3036 flow->cvars.drop_next)));
3039 if (nla_nest_end(d->skb, stats) < 0)
3046 nla_nest_cancel(d->skb, stats);
3050 static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
3052 struct cake_sched_data *q = qdisc_priv(sch);
3058 for (i = 0; i < q->tin_cnt; i++) {
3059 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
3061 for (j = 0; j < CAKE_QUEUES; j++) {
3062 if (list_empty(&b->flows[j].flowchain) ||
3063 arg->count < arg->skip) {
3067 if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
3076 static const struct Qdisc_class_ops cake_class_ops = {
3079 .tcf_block = cake_tcf_block,
3080 .bind_tcf = cake_bind,
3081 .unbind_tcf = cake_unbind,
3082 .dump = cake_dump_class,
3083 .dump_stats = cake_dump_class_stats,
3087 static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
3088 .cl_ops = &cake_class_ops,
3090 .priv_size = sizeof(struct cake_sched_data),
3091 .enqueue = cake_enqueue,
3092 .dequeue = cake_dequeue,
3093 .peek = qdisc_peek_dequeued,
3095 .reset = cake_reset,
3096 .destroy = cake_destroy,
3097 .change = cake_change,
3099 .dump_stats = cake_dump_stats,
3100 .owner = THIS_MODULE,
3103 static int __init cake_module_init(void)
3105 return register_qdisc(&cake_qdisc_ops);
3108 static void __exit cake_module_exit(void)
3110 unregister_qdisc(&cake_qdisc_ops);
3113 module_init(cake_module_init)
3114 module_exit(cake_module_exit)
3115 MODULE_AUTHOR("Jonathan Morton");
3116 MODULE_LICENSE("Dual BSD/GPL");
3117 MODULE_DESCRIPTION("The CAKE shaper.");