Merge tag 'omap-for-v5.8/dt-missed-signed' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / net / sched / act_gate.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright 2020 NXP */
3
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/errno.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/tc_act/tc_gate.h>
17
18 static unsigned int gate_net_id;
19 static struct tc_action_ops act_gate_ops;
20
21 static ktime_t gate_get_time(struct tcf_gate *gact)
22 {
23         ktime_t mono = ktime_get();
24
25         switch (gact->tk_offset) {
26         case TK_OFFS_MAX:
27                 return mono;
28         default:
29                 return ktime_mono_to_any(mono, gact->tk_offset);
30         }
31
32         return KTIME_MAX;
33 }
34
35 static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
36 {
37         struct tcf_gate_params *param = &gact->param;
38         ktime_t now, base, cycle;
39         u64 n;
40
41         base = ns_to_ktime(param->tcfg_basetime);
42         now = gate_get_time(gact);
43
44         if (ktime_after(base, now)) {
45                 *start = base;
46                 return 0;
47         }
48
49         cycle = param->tcfg_cycletime;
50
51         /* cycle time should not be zero */
52         if (!cycle)
53                 return -EFAULT;
54
55         n = div64_u64(ktime_sub_ns(now, base), cycle);
56         *start = ktime_add_ns(base, (n + 1) * cycle);
57         return 0;
58 }
59
60 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
61 {
62         ktime_t expires;
63
64         expires = hrtimer_get_expires(&gact->hitimer);
65         if (expires == 0)
66                 expires = KTIME_MAX;
67
68         start = min_t(ktime_t, start, expires);
69
70         hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT);
71 }
72
73 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
74 {
75         struct tcf_gate *gact = container_of(timer, struct tcf_gate,
76                                              hitimer);
77         struct tcf_gate_params *p = &gact->param;
78         struct tcfg_gate_entry *next;
79         ktime_t close_time, now;
80
81         spin_lock(&gact->tcf_lock);
82
83         next = gact->next_entry;
84
85         /* cycle start, clear pending bit, clear total octets */
86         gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
87         gact->current_entry_octets = 0;
88         gact->current_max_octets = next->maxoctets;
89
90         gact->current_close_time = ktime_add_ns(gact->current_close_time,
91                                                 next->interval);
92
93         close_time = gact->current_close_time;
94
95         if (list_is_last(&next->list, &p->entries))
96                 next = list_first_entry(&p->entries,
97                                         struct tcfg_gate_entry, list);
98         else
99                 next = list_next_entry(next, list);
100
101         now = gate_get_time(gact);
102
103         if (ktime_after(now, close_time)) {
104                 ktime_t cycle, base;
105                 u64 n;
106
107                 cycle = p->tcfg_cycletime;
108                 base = ns_to_ktime(p->tcfg_basetime);
109                 n = div64_u64(ktime_sub_ns(now, base), cycle);
110                 close_time = ktime_add_ns(base, (n + 1) * cycle);
111         }
112
113         gact->next_entry = next;
114
115         hrtimer_set_expires(&gact->hitimer, close_time);
116
117         spin_unlock(&gact->tcf_lock);
118
119         return HRTIMER_RESTART;
120 }
121
122 static int tcf_gate_act(struct sk_buff *skb, const struct tc_action *a,
123                         struct tcf_result *res)
124 {
125         struct tcf_gate *gact = to_gate(a);
126
127         spin_lock(&gact->tcf_lock);
128
129         tcf_lastuse_update(&gact->tcf_tm);
130         bstats_update(&gact->tcf_bstats, skb);
131
132         if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
133                 spin_unlock(&gact->tcf_lock);
134                 return gact->tcf_action;
135         }
136
137         if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN))
138                 goto drop;
139
140         if (gact->current_max_octets >= 0) {
141                 gact->current_entry_octets += qdisc_pkt_len(skb);
142                 if (gact->current_entry_octets > gact->current_max_octets) {
143                         gact->tcf_qstats.overlimits++;
144                         goto drop;
145                 }
146         }
147
148         spin_unlock(&gact->tcf_lock);
149
150         return gact->tcf_action;
151 drop:
152         gact->tcf_qstats.drops++;
153         spin_unlock(&gact->tcf_lock);
154
155         return TC_ACT_SHOT;
156 }
157
158 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
159         [TCA_GATE_ENTRY_INDEX]          = { .type = NLA_U32 },
160         [TCA_GATE_ENTRY_GATE]           = { .type = NLA_FLAG },
161         [TCA_GATE_ENTRY_INTERVAL]       = { .type = NLA_U32 },
162         [TCA_GATE_ENTRY_IPV]            = { .type = NLA_S32 },
163         [TCA_GATE_ENTRY_MAX_OCTETS]     = { .type = NLA_S32 },
164 };
165
166 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
167         [TCA_GATE_PARMS]                = { .len = sizeof(struct tc_gate),
168                                             .type = NLA_EXACT_LEN },
169         [TCA_GATE_PRIORITY]             = { .type = NLA_S32 },
170         [TCA_GATE_ENTRY_LIST]           = { .type = NLA_NESTED },
171         [TCA_GATE_BASE_TIME]            = { .type = NLA_U64 },
172         [TCA_GATE_CYCLE_TIME]           = { .type = NLA_U64 },
173         [TCA_GATE_CYCLE_TIME_EXT]       = { .type = NLA_U64 },
174         [TCA_GATE_FLAGS]                = { .type = NLA_U32 },
175         [TCA_GATE_CLOCKID]              = { .type = NLA_S32 },
176 };
177
178 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
179                            struct netlink_ext_ack *extack)
180 {
181         u32 interval = 0;
182
183         entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]);
184
185         if (tb[TCA_GATE_ENTRY_INTERVAL])
186                 interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]);
187
188         if (interval == 0) {
189                 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
190                 return -EINVAL;
191         }
192
193         entry->interval = interval;
194
195         if (tb[TCA_GATE_ENTRY_IPV])
196                 entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
197         else
198                 entry->ipv = -1;
199
200         if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
201                 entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
202         else
203                 entry->maxoctets = -1;
204
205         return 0;
206 }
207
208 static int parse_gate_entry(struct nlattr *n, struct  tcfg_gate_entry *entry,
209                             int index, struct netlink_ext_ack *extack)
210 {
211         struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
212         int err;
213
214         err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack);
215         if (err < 0) {
216                 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
217                 return -EINVAL;
218         }
219
220         entry->index = index;
221
222         return fill_gate_entry(tb, entry, extack);
223 }
224
225 static void release_entry_list(struct list_head *entries)
226 {
227         struct tcfg_gate_entry *entry, *e;
228
229         list_for_each_entry_safe(entry, e, entries, list) {
230                 list_del(&entry->list);
231                 kfree(entry);
232         }
233 }
234
235 static int parse_gate_list(struct nlattr *list_attr,
236                            struct tcf_gate_params *sched,
237                            struct netlink_ext_ack *extack)
238 {
239         struct tcfg_gate_entry *entry;
240         struct nlattr *n;
241         int err, rem;
242         int i = 0;
243
244         if (!list_attr)
245                 return -EINVAL;
246
247         nla_for_each_nested(n, list_attr, rem) {
248                 if (nla_type(n) != TCA_GATE_ONE_ENTRY) {
249                         NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'");
250                         continue;
251                 }
252
253                 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
254                 if (!entry) {
255                         NL_SET_ERR_MSG(extack, "Not enough memory for entry");
256                         err = -ENOMEM;
257                         goto release_list;
258                 }
259
260                 err = parse_gate_entry(n, entry, i, extack);
261                 if (err < 0) {
262                         kfree(entry);
263                         goto release_list;
264                 }
265
266                 list_add_tail(&entry->list, &sched->entries);
267                 i++;
268         }
269
270         sched->num_entries = i;
271
272         return i;
273
274 release_list:
275         release_entry_list(&sched->entries);
276
277         return err;
278 }
279
280 static int tcf_gate_init(struct net *net, struct nlattr *nla,
281                          struct nlattr *est, struct tc_action **a,
282                          int ovr, int bind, bool rtnl_held,
283                          struct tcf_proto *tp, u32 flags,
284                          struct netlink_ext_ack *extack)
285 {
286         struct tc_action_net *tn = net_generic(net, gate_net_id);
287         enum tk_offsets tk_offset = TK_OFFS_TAI;
288         struct nlattr *tb[TCA_GATE_MAX + 1];
289         struct tcf_chain *goto_ch = NULL;
290         struct tcf_gate_params *p;
291         s32 clockid = CLOCK_TAI;
292         struct tcf_gate *gact;
293         struct tc_gate *parm;
294         int ret = 0, err;
295         u64 basetime = 0;
296         u32 gflags = 0;
297         s32 prio = -1;
298         ktime_t start;
299         u32 index;
300
301         if (!nla)
302                 return -EINVAL;
303
304         err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack);
305         if (err < 0)
306                 return err;
307
308         if (!tb[TCA_GATE_PARMS])
309                 return -EINVAL;
310
311         parm = nla_data(tb[TCA_GATE_PARMS]);
312         index = parm->index;
313
314         err = tcf_idr_check_alloc(tn, &index, a, bind);
315         if (err < 0)
316                 return err;
317
318         if (err && bind)
319                 return 0;
320
321         if (!err) {
322                 ret = tcf_idr_create(tn, index, est, a,
323                                      &act_gate_ops, bind, false, 0);
324                 if (ret) {
325                         tcf_idr_cleanup(tn, index);
326                         return ret;
327                 }
328
329                 ret = ACT_P_CREATED;
330         } else if (!ovr) {
331                 tcf_idr_release(*a, bind);
332                 return -EEXIST;
333         }
334         if (ret == ACT_P_CREATED) {
335                 to_gate(*a)->param.tcfg_clockid = -1;
336                 INIT_LIST_HEAD(&(to_gate(*a)->param.entries));
337         }
338
339         if (tb[TCA_GATE_PRIORITY])
340                 prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
341
342         if (tb[TCA_GATE_BASE_TIME])
343                 basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
344
345         if (tb[TCA_GATE_FLAGS])
346                 gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
347
348         if (tb[TCA_GATE_CLOCKID]) {
349                 clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
350                 switch (clockid) {
351                 case CLOCK_REALTIME:
352                         tk_offset = TK_OFFS_REAL;
353                         break;
354                 case CLOCK_MONOTONIC:
355                         tk_offset = TK_OFFS_MAX;
356                         break;
357                 case CLOCK_BOOTTIME:
358                         tk_offset = TK_OFFS_BOOT;
359                         break;
360                 case CLOCK_TAI:
361                         tk_offset = TK_OFFS_TAI;
362                         break;
363                 default:
364                         NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
365                         goto release_idr;
366                 }
367         }
368
369         err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
370         if (err < 0)
371                 goto release_idr;
372
373         gact = to_gate(*a);
374
375         spin_lock_bh(&gact->tcf_lock);
376         p = &gact->param;
377
378         if (tb[TCA_GATE_CYCLE_TIME]) {
379                 p->tcfg_cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
380                 if (!p->tcfg_cycletime_ext)
381                         goto chain_put;
382         }
383
384         if (tb[TCA_GATE_ENTRY_LIST]) {
385                 err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
386                 if (err < 0)
387                         goto chain_put;
388         }
389
390         if (!p->tcfg_cycletime) {
391                 struct tcfg_gate_entry *entry;
392                 ktime_t cycle = 0;
393
394                 list_for_each_entry(entry, &p->entries, list)
395                         cycle = ktime_add_ns(cycle, entry->interval);
396                 p->tcfg_cycletime = cycle;
397         }
398
399         if (tb[TCA_GATE_CYCLE_TIME_EXT])
400                 p->tcfg_cycletime_ext =
401                         nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
402
403         p->tcfg_priority = prio;
404         p->tcfg_basetime = basetime;
405         p->tcfg_clockid = clockid;
406         p->tcfg_flags = gflags;
407
408         gact->tk_offset = tk_offset;
409         hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
410         gact->hitimer.function = gate_timer_func;
411
412         err = gate_get_start_time(gact, &start);
413         if (err < 0) {
414                 NL_SET_ERR_MSG(extack,
415                                "Internal error: failed get start time");
416                 release_entry_list(&p->entries);
417                 goto chain_put;
418         }
419
420         gact->current_close_time = start;
421         gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
422
423         gact->next_entry = list_first_entry(&p->entries,
424                                             struct tcfg_gate_entry, list);
425
426         goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
427
428         gate_start_timer(gact, start);
429
430         spin_unlock_bh(&gact->tcf_lock);
431
432         if (goto_ch)
433                 tcf_chain_put_by_act(goto_ch);
434
435         if (ret == ACT_P_CREATED)
436                 tcf_idr_insert(tn, *a);
437
438         return ret;
439
440 chain_put:
441         spin_unlock_bh(&gact->tcf_lock);
442
443         if (goto_ch)
444                 tcf_chain_put_by_act(goto_ch);
445 release_idr:
446         tcf_idr_release(*a, bind);
447         return err;
448 }
449
450 static void tcf_gate_cleanup(struct tc_action *a)
451 {
452         struct tcf_gate *gact = to_gate(a);
453         struct tcf_gate_params *p;
454
455         p = &gact->param;
456         if (p->tcfg_clockid != -1)
457                 hrtimer_cancel(&gact->hitimer);
458
459         release_entry_list(&p->entries);
460 }
461
462 static int dumping_entry(struct sk_buff *skb,
463                          struct tcfg_gate_entry *entry)
464 {
465         struct nlattr *item;
466
467         item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY);
468         if (!item)
469                 return -ENOSPC;
470
471         if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
472                 goto nla_put_failure;
473
474         if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
475                 goto nla_put_failure;
476
477         if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval))
478                 goto nla_put_failure;
479
480         if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets))
481                 goto nla_put_failure;
482
483         if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv))
484                 goto nla_put_failure;
485
486         return nla_nest_end(skb, item);
487
488 nla_put_failure:
489         nla_nest_cancel(skb, item);
490         return -1;
491 }
492
493 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
494                          int bind, int ref)
495 {
496         unsigned char *b = skb_tail_pointer(skb);
497         struct tcf_gate *gact = to_gate(a);
498         struct tc_gate opt = {
499                 .index    = gact->tcf_index,
500                 .refcnt   = refcount_read(&gact->tcf_refcnt) - ref,
501                 .bindcnt  = atomic_read(&gact->tcf_bindcnt) - bind,
502         };
503         struct tcfg_gate_entry *entry;
504         struct tcf_gate_params *p;
505         struct nlattr *entry_list;
506         struct tcf_t t;
507
508         spin_lock_bh(&gact->tcf_lock);
509         opt.action = gact->tcf_action;
510
511         p = &gact->param;
512
513         if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
514                 goto nla_put_failure;
515
516         if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
517                               p->tcfg_basetime, TCA_GATE_PAD))
518                 goto nla_put_failure;
519
520         if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
521                               p->tcfg_cycletime, TCA_GATE_PAD))
522                 goto nla_put_failure;
523
524         if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
525                               p->tcfg_cycletime_ext, TCA_GATE_PAD))
526                 goto nla_put_failure;
527
528         if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
529                 goto nla_put_failure;
530
531         if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
532                 goto nla_put_failure;
533
534         if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
535                 goto nla_put_failure;
536
537         entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
538         if (!entry_list)
539                 goto nla_put_failure;
540
541         list_for_each_entry(entry, &p->entries, list) {
542                 if (dumping_entry(skb, entry) < 0)
543                         goto nla_put_failure;
544         }
545
546         nla_nest_end(skb, entry_list);
547
548         tcf_tm_dump(&t, &gact->tcf_tm);
549         if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
550                 goto nla_put_failure;
551         spin_unlock_bh(&gact->tcf_lock);
552
553         return skb->len;
554
555 nla_put_failure:
556         spin_unlock_bh(&gact->tcf_lock);
557         nlmsg_trim(skb, b);
558         return -1;
559 }
560
561 static int tcf_gate_walker(struct net *net, struct sk_buff *skb,
562                            struct netlink_callback *cb, int type,
563                            const struct tc_action_ops *ops,
564                            struct netlink_ext_ack *extack)
565 {
566         struct tc_action_net *tn = net_generic(net, gate_net_id);
567
568         return tcf_generic_walker(tn, skb, cb, type, ops, extack);
569 }
570
571 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u32 packets,
572                                   u64 lastuse, bool hw)
573 {
574         struct tcf_gate *gact = to_gate(a);
575         struct tcf_t *tm = &gact->tcf_tm;
576
577         tcf_action_update_stats(a, bytes, packets, false, hw);
578         tm->lastuse = max_t(u64, tm->lastuse, lastuse);
579 }
580
581 static int tcf_gate_search(struct net *net, struct tc_action **a, u32 index)
582 {
583         struct tc_action_net *tn = net_generic(net, gate_net_id);
584
585         return tcf_idr_search(tn, a, index);
586 }
587
588 static size_t tcf_gate_get_fill_size(const struct tc_action *act)
589 {
590         return nla_total_size(sizeof(struct tc_gate));
591 }
592
593 static struct tc_action_ops act_gate_ops = {
594         .kind           =       "gate",
595         .id             =       TCA_ID_GATE,
596         .owner          =       THIS_MODULE,
597         .act            =       tcf_gate_act,
598         .dump           =       tcf_gate_dump,
599         .init           =       tcf_gate_init,
600         .cleanup        =       tcf_gate_cleanup,
601         .walk           =       tcf_gate_walker,
602         .stats_update   =       tcf_gate_stats_update,
603         .get_fill_size  =       tcf_gate_get_fill_size,
604         .lookup         =       tcf_gate_search,
605         .size           =       sizeof(struct tcf_gate),
606 };
607
608 static __net_init int gate_init_net(struct net *net)
609 {
610         struct tc_action_net *tn = net_generic(net, gate_net_id);
611
612         return tc_action_net_init(net, tn, &act_gate_ops);
613 }
614
615 static void __net_exit gate_exit_net(struct list_head *net_list)
616 {
617         tc_action_net_exit(net_list, gate_net_id);
618 }
619
620 static struct pernet_operations gate_net_ops = {
621         .init = gate_init_net,
622         .exit_batch = gate_exit_net,
623         .id   = &gate_net_id,
624         .size = sizeof(struct tc_action_net),
625 };
626
627 static int __init gate_init_module(void)
628 {
629         return tcf_register_action(&act_gate_ops, &gate_net_ops);
630 }
631
632 static void __exit gate_cleanup_module(void)
633 {
634         tcf_unregister_action(&act_gate_ops, &gate_net_ops);
635 }
636
637 module_init(gate_init_module);
638 module_exit(gate_cleanup_module);
639 MODULE_LICENSE("GPL v2");