Merge tag 'f2fs-for-v5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeu...
[linux-2.6-microblaze.git] / net / sched / sch_dsmark.c
1 /* net/sched/sch_dsmark.c - Differentiated Services field marker */
2
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4
5
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/skbuff.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/bitops.h>
15 #include <net/pkt_sched.h>
16 #include <net/pkt_cls.h>
17 #include <net/dsfield.h>
18 #include <net/inet_ecn.h>
19 #include <asm/byteorder.h>
20
21 /*
22  * classid      class           marking
23  * -------      -----           -------
24  *   n/a          0             n/a
25  *   x:0          1             use entry [0]
26  *   ...         ...            ...
27  *   x:y y>0     y+1            use entry [y]
28  *   ...         ...            ...
29  * x:indices-1  indices         use entry [indices-1]
30  *   ...         ...            ...
31  *   x:y         y+1            use entry [y & (indices-1)]
32  *   ...         ...            ...
33  * 0xffff       0x10000         use entry [indices-1]
34  */
35
36
37 #define NO_DEFAULT_INDEX        (1 << 16)
38
39 struct mask_value {
40         u8                      mask;
41         u8                      value;
42 };
43
44 struct dsmark_qdisc_data {
45         struct Qdisc            *q;
46         struct tcf_proto __rcu  *filter_list;
47         struct tcf_block        *block;
48         struct mask_value       *mv;
49         u16                     indices;
50         u8                      set_tc_index;
51         u32                     default_index;  /* index range is 0...0xffff */
52 #define DSMARK_EMBEDDED_SZ      16
53         struct mask_value       embedded[DSMARK_EMBEDDED_SZ];
54 };
55
56 static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
57 {
58         return index <= p->indices && index > 0;
59 }
60
61 /* ------------------------- Class/flow operations ------------------------- */
62
63 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
64                         struct Qdisc *new, struct Qdisc **old,
65                         struct netlink_ext_ack *extack)
66 {
67         struct dsmark_qdisc_data *p = qdisc_priv(sch);
68
69         pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
70                  __func__, sch, p, new, old);
71
72         if (new == NULL) {
73                 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
74                                         sch->handle, NULL);
75                 if (new == NULL)
76                         new = &noop_qdisc;
77         }
78
79         *old = qdisc_replace(sch, new, &p->q);
80         return 0;
81 }
82
83 static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
84 {
85         struct dsmark_qdisc_data *p = qdisc_priv(sch);
86         return p->q;
87 }
88
89 static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
90 {
91         return TC_H_MIN(classid) + 1;
92 }
93
94 static unsigned long dsmark_bind_filter(struct Qdisc *sch,
95                                         unsigned long parent, u32 classid)
96 {
97         pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
98                  __func__, sch, qdisc_priv(sch), classid);
99
100         return dsmark_find(sch, classid);
101 }
102
103 static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
104 {
105 }
106
107 static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
108         [TCA_DSMARK_INDICES]            = { .type = NLA_U16 },
109         [TCA_DSMARK_DEFAULT_INDEX]      = { .type = NLA_U16 },
110         [TCA_DSMARK_SET_TC_INDEX]       = { .type = NLA_FLAG },
111         [TCA_DSMARK_MASK]               = { .type = NLA_U8 },
112         [TCA_DSMARK_VALUE]              = { .type = NLA_U8 },
113 };
114
115 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
116                          struct nlattr **tca, unsigned long *arg,
117                          struct netlink_ext_ack *extack)
118 {
119         struct dsmark_qdisc_data *p = qdisc_priv(sch);
120         struct nlattr *opt = tca[TCA_OPTIONS];
121         struct nlattr *tb[TCA_DSMARK_MAX + 1];
122         int err = -EINVAL;
123
124         pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
125                  __func__, sch, p, classid, parent, *arg);
126
127         if (!dsmark_valid_index(p, *arg)) {
128                 err = -ENOENT;
129                 goto errout;
130         }
131
132         if (!opt)
133                 goto errout;
134
135         err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
136                                           dsmark_policy, NULL);
137         if (err < 0)
138                 goto errout;
139
140         if (tb[TCA_DSMARK_VALUE])
141                 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
142
143         if (tb[TCA_DSMARK_MASK])
144                 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
145
146         err = 0;
147
148 errout:
149         return err;
150 }
151
152 static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
153 {
154         struct dsmark_qdisc_data *p = qdisc_priv(sch);
155
156         if (!dsmark_valid_index(p, arg))
157                 return -EINVAL;
158
159         p->mv[arg - 1].mask = 0xff;
160         p->mv[arg - 1].value = 0;
161
162         return 0;
163 }
164
165 static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
166 {
167         struct dsmark_qdisc_data *p = qdisc_priv(sch);
168         int i;
169
170         pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
171                  __func__, sch, p, walker);
172
173         if (walker->stop)
174                 return;
175
176         for (i = 0; i < p->indices; i++) {
177                 if (p->mv[i].mask == 0xff && !p->mv[i].value)
178                         goto ignore;
179                 if (walker->count >= walker->skip) {
180                         if (walker->fn(sch, i + 1, walker) < 0) {
181                                 walker->stop = 1;
182                                 break;
183                         }
184                 }
185 ignore:
186                 walker->count++;
187         }
188 }
189
190 static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
191                                           struct netlink_ext_ack *extack)
192 {
193         struct dsmark_qdisc_data *p = qdisc_priv(sch);
194
195         return p->block;
196 }
197
198 /* --------------------------- Qdisc operations ---------------------------- */
199
200 static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
201                           struct sk_buff **to_free)
202 {
203         unsigned int len = qdisc_pkt_len(skb);
204         struct dsmark_qdisc_data *p = qdisc_priv(sch);
205         int err;
206
207         pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
208
209         if (p->set_tc_index) {
210                 int wlen = skb_network_offset(skb);
211
212                 switch (tc_skb_protocol(skb)) {
213                 case htons(ETH_P_IP):
214                         wlen += sizeof(struct iphdr);
215                         if (!pskb_may_pull(skb, wlen) ||
216                             skb_try_make_writable(skb, wlen))
217                                 goto drop;
218
219                         skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
220                                 & ~INET_ECN_MASK;
221                         break;
222
223                 case htons(ETH_P_IPV6):
224                         wlen += sizeof(struct ipv6hdr);
225                         if (!pskb_may_pull(skb, wlen) ||
226                             skb_try_make_writable(skb, wlen))
227                                 goto drop;
228
229                         skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
230                                 & ~INET_ECN_MASK;
231                         break;
232                 default:
233                         skb->tc_index = 0;
234                         break;
235                 }
236         }
237
238         if (TC_H_MAJ(skb->priority) == sch->handle)
239                 skb->tc_index = TC_H_MIN(skb->priority);
240         else {
241                 struct tcf_result res;
242                 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
243                 int result = tcf_classify(skb, fl, &res, false);
244
245                 pr_debug("result %d class 0x%04x\n", result, res.classid);
246
247                 switch (result) {
248 #ifdef CONFIG_NET_CLS_ACT
249                 case TC_ACT_QUEUED:
250                 case TC_ACT_STOLEN:
251                 case TC_ACT_TRAP:
252                         __qdisc_drop(skb, to_free);
253                         return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
254
255                 case TC_ACT_SHOT:
256                         goto drop;
257 #endif
258                 case TC_ACT_OK:
259                         skb->tc_index = TC_H_MIN(res.classid);
260                         break;
261
262                 default:
263                         if (p->default_index != NO_DEFAULT_INDEX)
264                                 skb->tc_index = p->default_index;
265                         break;
266                 }
267         }
268
269         err = qdisc_enqueue(skb, p->q, to_free);
270         if (err != NET_XMIT_SUCCESS) {
271                 if (net_xmit_drop_count(err))
272                         qdisc_qstats_drop(sch);
273                 return err;
274         }
275
276         sch->qstats.backlog += len;
277         sch->q.qlen++;
278
279         return NET_XMIT_SUCCESS;
280
281 drop:
282         qdisc_drop(skb, sch, to_free);
283         return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
284 }
285
286 static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
287 {
288         struct dsmark_qdisc_data *p = qdisc_priv(sch);
289         struct sk_buff *skb;
290         u32 index;
291
292         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
293
294         skb = qdisc_dequeue_peeked(p->q);
295         if (skb == NULL)
296                 return NULL;
297
298         qdisc_bstats_update(sch, skb);
299         qdisc_qstats_backlog_dec(sch, skb);
300         sch->q.qlen--;
301
302         index = skb->tc_index & (p->indices - 1);
303         pr_debug("index %d->%d\n", skb->tc_index, index);
304
305         switch (tc_skb_protocol(skb)) {
306         case htons(ETH_P_IP):
307                 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
308                                     p->mv[index].value);
309                         break;
310         case htons(ETH_P_IPV6):
311                 ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
312                                     p->mv[index].value);
313                         break;
314         default:
315                 /*
316                  * Only complain if a change was actually attempted.
317                  * This way, we can send non-IP traffic through dsmark
318                  * and don't need yet another qdisc as a bypass.
319                  */
320                 if (p->mv[index].mask != 0xff || p->mv[index].value)
321                         pr_warn("%s: unsupported protocol %d\n",
322                                 __func__, ntohs(tc_skb_protocol(skb)));
323                 break;
324         }
325
326         return skb;
327 }
328
329 static struct sk_buff *dsmark_peek(struct Qdisc *sch)
330 {
331         struct dsmark_qdisc_data *p = qdisc_priv(sch);
332
333         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
334
335         return p->q->ops->peek(p->q);
336 }
337
338 static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
339                        struct netlink_ext_ack *extack)
340 {
341         struct dsmark_qdisc_data *p = qdisc_priv(sch);
342         struct nlattr *tb[TCA_DSMARK_MAX + 1];
343         int err = -EINVAL;
344         u32 default_index = NO_DEFAULT_INDEX;
345         u16 indices;
346         int i;
347
348         pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
349
350         if (!opt)
351                 goto errout;
352
353         err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
354         if (err)
355                 return err;
356
357         err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
358                                           dsmark_policy, NULL);
359         if (err < 0)
360                 goto errout;
361
362         err = -EINVAL;
363         indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
364
365         if (hweight32(indices) != 1)
366                 goto errout;
367
368         if (tb[TCA_DSMARK_DEFAULT_INDEX])
369                 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
370
371         if (indices <= DSMARK_EMBEDDED_SZ)
372                 p->mv = p->embedded;
373         else
374                 p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
375         if (!p->mv) {
376                 err = -ENOMEM;
377                 goto errout;
378         }
379         for (i = 0; i < indices; i++) {
380                 p->mv[i].mask = 0xff;
381                 p->mv[i].value = 0;
382         }
383         p->indices = indices;
384         p->default_index = default_index;
385         p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
386
387         p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
388                                  NULL);
389         if (p->q == NULL)
390                 p->q = &noop_qdisc;
391         else
392                 qdisc_hash_add(p->q, true);
393
394         pr_debug("%s: qdisc %p\n", __func__, p->q);
395
396         err = 0;
397 errout:
398         return err;
399 }
400
401 static void dsmark_reset(struct Qdisc *sch)
402 {
403         struct dsmark_qdisc_data *p = qdisc_priv(sch);
404
405         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
406         qdisc_reset(p->q);
407         sch->qstats.backlog = 0;
408         sch->q.qlen = 0;
409 }
410
411 static void dsmark_destroy(struct Qdisc *sch)
412 {
413         struct dsmark_qdisc_data *p = qdisc_priv(sch);
414
415         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
416
417         tcf_block_put(p->block);
418         qdisc_put(p->q);
419         if (p->mv != p->embedded)
420                 kfree(p->mv);
421 }
422
423 static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
424                              struct sk_buff *skb, struct tcmsg *tcm)
425 {
426         struct dsmark_qdisc_data *p = qdisc_priv(sch);
427         struct nlattr *opts = NULL;
428
429         pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
430
431         if (!dsmark_valid_index(p, cl))
432                 return -EINVAL;
433
434         tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
435         tcm->tcm_info = p->q->handle;
436
437         opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
438         if (opts == NULL)
439                 goto nla_put_failure;
440         if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
441             nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
442                 goto nla_put_failure;
443
444         return nla_nest_end(skb, opts);
445
446 nla_put_failure:
447         nla_nest_cancel(skb, opts);
448         return -EMSGSIZE;
449 }
450
451 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
452 {
453         struct dsmark_qdisc_data *p = qdisc_priv(sch);
454         struct nlattr *opts = NULL;
455
456         opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
457         if (opts == NULL)
458                 goto nla_put_failure;
459         if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
460                 goto nla_put_failure;
461
462         if (p->default_index != NO_DEFAULT_INDEX &&
463             nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
464                 goto nla_put_failure;
465
466         if (p->set_tc_index &&
467             nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
468                 goto nla_put_failure;
469
470         return nla_nest_end(skb, opts);
471
472 nla_put_failure:
473         nla_nest_cancel(skb, opts);
474         return -EMSGSIZE;
475 }
476
477 static const struct Qdisc_class_ops dsmark_class_ops = {
478         .graft          =       dsmark_graft,
479         .leaf           =       dsmark_leaf,
480         .find           =       dsmark_find,
481         .change         =       dsmark_change,
482         .delete         =       dsmark_delete,
483         .walk           =       dsmark_walk,
484         .tcf_block      =       dsmark_tcf_block,
485         .bind_tcf       =       dsmark_bind_filter,
486         .unbind_tcf     =       dsmark_unbind_filter,
487         .dump           =       dsmark_dump_class,
488 };
489
490 static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
491         .next           =       NULL,
492         .cl_ops         =       &dsmark_class_ops,
493         .id             =       "dsmark",
494         .priv_size      =       sizeof(struct dsmark_qdisc_data),
495         .enqueue        =       dsmark_enqueue,
496         .dequeue        =       dsmark_dequeue,
497         .peek           =       dsmark_peek,
498         .init           =       dsmark_init,
499         .reset          =       dsmark_reset,
500         .destroy        =       dsmark_destroy,
501         .change         =       NULL,
502         .dump           =       dsmark_dump,
503         .owner          =       THIS_MODULE,
504 };
505
506 static int __init dsmark_module_init(void)
507 {
508         return register_qdisc(&dsmark_qdisc_ops);
509 }
510
511 static void __exit dsmark_module_exit(void)
512 {
513         unregister_qdisc(&dsmark_qdisc_ops);
514 }
515
516 module_init(dsmark_module_init)
517 module_exit(dsmark_module_exit)
518
519 MODULE_LICENSE("GPL");