1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
8 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
10 struct flow_rule *rule;
12 rule = kzalloc(struct_size(rule, action.entries, num_actions),
17 rule->action.num_entries = num_actions;
21 EXPORT_SYMBOL(flow_rule_alloc);
23 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
24 const struct flow_match *__m = &(__rule)->match; \
25 struct flow_dissector *__d = (__m)->dissector; \
27 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
28 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
30 void flow_rule_match_meta(const struct flow_rule *rule,
31 struct flow_match_meta *out)
33 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
35 EXPORT_SYMBOL(flow_rule_match_meta);
37 void flow_rule_match_basic(const struct flow_rule *rule,
38 struct flow_match_basic *out)
40 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
42 EXPORT_SYMBOL(flow_rule_match_basic);
44 void flow_rule_match_control(const struct flow_rule *rule,
45 struct flow_match_control *out)
47 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
49 EXPORT_SYMBOL(flow_rule_match_control);
51 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
52 struct flow_match_eth_addrs *out)
54 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
56 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
58 void flow_rule_match_vlan(const struct flow_rule *rule,
59 struct flow_match_vlan *out)
61 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
63 EXPORT_SYMBOL(flow_rule_match_vlan);
65 void flow_rule_match_cvlan(const struct flow_rule *rule,
66 struct flow_match_vlan *out)
68 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
70 EXPORT_SYMBOL(flow_rule_match_cvlan);
72 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
73 struct flow_match_ipv4_addrs *out)
75 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
77 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
79 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
80 struct flow_match_ipv6_addrs *out)
82 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
84 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
86 void flow_rule_match_ip(const struct flow_rule *rule,
87 struct flow_match_ip *out)
89 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
91 EXPORT_SYMBOL(flow_rule_match_ip);
93 void flow_rule_match_ports(const struct flow_rule *rule,
94 struct flow_match_ports *out)
96 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
98 EXPORT_SYMBOL(flow_rule_match_ports);
100 void flow_rule_match_tcp(const struct flow_rule *rule,
101 struct flow_match_tcp *out)
103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
105 EXPORT_SYMBOL(flow_rule_match_tcp);
107 void flow_rule_match_icmp(const struct flow_rule *rule,
108 struct flow_match_icmp *out)
110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
112 EXPORT_SYMBOL(flow_rule_match_icmp);
114 void flow_rule_match_mpls(const struct flow_rule *rule,
115 struct flow_match_mpls *out)
117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
119 EXPORT_SYMBOL(flow_rule_match_mpls);
121 void flow_rule_match_enc_control(const struct flow_rule *rule,
122 struct flow_match_control *out)
124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
126 EXPORT_SYMBOL(flow_rule_match_enc_control);
128 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
129 struct flow_match_ipv4_addrs *out)
131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
133 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
135 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
136 struct flow_match_ipv6_addrs *out)
138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
140 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
142 void flow_rule_match_enc_ip(const struct flow_rule *rule,
143 struct flow_match_ip *out)
145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
147 EXPORT_SYMBOL(flow_rule_match_enc_ip);
149 void flow_rule_match_enc_ports(const struct flow_rule *rule,
150 struct flow_match_ports *out)
152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
154 EXPORT_SYMBOL(flow_rule_match_enc_ports);
156 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
157 struct flow_match_enc_keyid *out)
159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
161 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
163 void flow_rule_match_enc_opts(const struct flow_rule *rule,
164 struct flow_match_enc_opts *out)
166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
168 EXPORT_SYMBOL(flow_rule_match_enc_opts);
170 struct flow_action_cookie *flow_action_cookie_create(void *data,
174 struct flow_action_cookie *cookie;
176 cookie = kmalloc(sizeof(*cookie) + len, gfp);
179 cookie->cookie_len = len;
180 memcpy(cookie->cookie, data, len);
183 EXPORT_SYMBOL(flow_action_cookie_create);
185 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
189 EXPORT_SYMBOL(flow_action_cookie_destroy);
191 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
192 void *cb_ident, void *cb_priv,
193 void (*release)(void *cb_priv))
195 struct flow_block_cb *block_cb;
197 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
199 return ERR_PTR(-ENOMEM);
202 block_cb->cb_ident = cb_ident;
203 block_cb->cb_priv = cb_priv;
204 block_cb->release = release;
208 EXPORT_SYMBOL(flow_block_cb_alloc);
210 void flow_block_cb_free(struct flow_block_cb *block_cb)
212 if (block_cb->release)
213 block_cb->release(block_cb->cb_priv);
217 EXPORT_SYMBOL(flow_block_cb_free);
219 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
220 flow_setup_cb_t *cb, void *cb_ident)
222 struct flow_block_cb *block_cb;
224 list_for_each_entry(block_cb, &block->cb_list, list) {
225 if (block_cb->cb == cb &&
226 block_cb->cb_ident == cb_ident)
232 EXPORT_SYMBOL(flow_block_cb_lookup);
234 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
236 return block_cb->cb_priv;
238 EXPORT_SYMBOL(flow_block_cb_priv);
240 void flow_block_cb_incref(struct flow_block_cb *block_cb)
244 EXPORT_SYMBOL(flow_block_cb_incref);
246 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
248 return --block_cb->refcnt;
250 EXPORT_SYMBOL(flow_block_cb_decref);
252 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
253 struct list_head *driver_block_list)
255 struct flow_block_cb *block_cb;
257 list_for_each_entry(block_cb, driver_block_list, driver_list) {
258 if (block_cb->cb == cb &&
259 block_cb->cb_ident == cb_ident)
265 EXPORT_SYMBOL(flow_block_cb_is_busy);
267 int flow_block_cb_setup_simple(struct flow_block_offload *f,
268 struct list_head *driver_block_list,
270 void *cb_ident, void *cb_priv,
273 struct flow_block_cb *block_cb;
276 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
279 f->driver_block_list = driver_block_list;
281 switch (f->command) {
282 case FLOW_BLOCK_BIND:
283 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
286 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
287 if (IS_ERR(block_cb))
288 return PTR_ERR(block_cb);
290 flow_block_cb_add(block_cb, f);
291 list_add_tail(&block_cb->driver_list, driver_block_list);
293 case FLOW_BLOCK_UNBIND:
294 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
298 flow_block_cb_remove(block_cb, f);
299 list_del(&block_cb->driver_list);
305 EXPORT_SYMBOL(flow_block_cb_setup_simple);
307 static LIST_HEAD(block_cb_list);
309 static struct rhashtable indr_setup_block_ht;
311 struct flow_indr_block_cb {
312 struct list_head list;
314 flow_indr_block_bind_cb_t *cb;
318 struct flow_indr_block_dev {
319 struct rhash_head ht_node;
320 struct net_device *dev;
322 struct list_head cb_list;
325 static const struct rhashtable_params flow_indr_setup_block_ht_params = {
326 .key_offset = offsetof(struct flow_indr_block_dev, dev),
327 .head_offset = offsetof(struct flow_indr_block_dev, ht_node),
328 .key_len = sizeof(struct net_device *),
331 static struct flow_indr_block_dev *
332 flow_indr_block_dev_lookup(struct net_device *dev)
334 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
335 flow_indr_setup_block_ht_params);
338 static struct flow_indr_block_dev *
339 flow_indr_block_dev_get(struct net_device *dev)
341 struct flow_indr_block_dev *indr_dev;
343 indr_dev = flow_indr_block_dev_lookup(dev);
347 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
351 INIT_LIST_HEAD(&indr_dev->cb_list);
353 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
354 flow_indr_setup_block_ht_params)) {
364 static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev)
366 if (--indr_dev->refcnt)
369 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
370 flow_indr_setup_block_ht_params);
374 static struct flow_indr_block_cb *
375 flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev,
376 flow_indr_block_bind_cb_t *cb, void *cb_ident)
378 struct flow_indr_block_cb *indr_block_cb;
380 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
381 if (indr_block_cb->cb == cb &&
382 indr_block_cb->cb_ident == cb_ident)
383 return indr_block_cb;
387 static struct flow_indr_block_cb *
388 flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv,
389 flow_indr_block_bind_cb_t *cb, void *cb_ident)
391 struct flow_indr_block_cb *indr_block_cb;
393 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
395 return ERR_PTR(-EEXIST);
397 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
399 return ERR_PTR(-ENOMEM);
401 indr_block_cb->cb_priv = cb_priv;
402 indr_block_cb->cb = cb;
403 indr_block_cb->cb_ident = cb_ident;
404 list_add(&indr_block_cb->list, &indr_dev->cb_list);
406 return indr_block_cb;
409 static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
411 list_del(&indr_block_cb->list);
412 kfree(indr_block_cb);
415 static DEFINE_MUTEX(flow_indr_block_cb_lock);
417 static void flow_block_cmd(struct net_device *dev,
418 flow_indr_block_bind_cb_t *cb, void *cb_priv,
419 enum flow_block_command command)
421 struct flow_indr_block_entry *entry;
423 mutex_lock(&flow_indr_block_cb_lock);
424 list_for_each_entry(entry, &block_cb_list, list) {
425 entry->cb(dev, cb, cb_priv, command);
427 mutex_unlock(&flow_indr_block_cb_lock);
430 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
431 flow_indr_block_bind_cb_t *cb,
434 struct flow_indr_block_cb *indr_block_cb;
435 struct flow_indr_block_dev *indr_dev;
438 indr_dev = flow_indr_block_dev_get(dev);
442 indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
443 err = PTR_ERR_OR_ZERO(indr_block_cb);
447 flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
453 flow_indr_block_dev_put(indr_dev);
456 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register);
458 int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
459 flow_indr_block_bind_cb_t *cb,
465 err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
470 EXPORT_SYMBOL_GPL(flow_indr_block_cb_register);
472 void __flow_indr_block_cb_unregister(struct net_device *dev,
473 flow_indr_block_bind_cb_t *cb,
476 struct flow_indr_block_cb *indr_block_cb;
477 struct flow_indr_block_dev *indr_dev;
479 indr_dev = flow_indr_block_dev_lookup(dev);
483 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
487 flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
490 flow_indr_block_cb_del(indr_block_cb);
491 flow_indr_block_dev_put(indr_dev);
493 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister);
495 void flow_indr_block_cb_unregister(struct net_device *dev,
496 flow_indr_block_bind_cb_t *cb,
500 __flow_indr_block_cb_unregister(dev, cb, cb_ident);
503 EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
505 void flow_indr_block_call(struct net_device *dev,
506 struct flow_block_offload *bo,
507 enum flow_block_command command)
509 struct flow_indr_block_cb *indr_block_cb;
510 struct flow_indr_block_dev *indr_dev;
512 indr_dev = flow_indr_block_dev_lookup(dev);
516 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
517 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
520 EXPORT_SYMBOL_GPL(flow_indr_block_call);
522 void flow_indr_add_block_cb(struct flow_indr_block_entry *entry)
524 mutex_lock(&flow_indr_block_cb_lock);
525 list_add_tail(&entry->list, &block_cb_list);
526 mutex_unlock(&flow_indr_block_cb_lock);
528 EXPORT_SYMBOL_GPL(flow_indr_add_block_cb);
530 void flow_indr_del_block_cb(struct flow_indr_block_entry *entry)
532 mutex_lock(&flow_indr_block_cb_lock);
533 list_del(&entry->list);
534 mutex_unlock(&flow_indr_block_cb_lock);
536 EXPORT_SYMBOL_GPL(flow_indr_del_block_cb);
538 static int __init init_flow_indr_rhashtable(void)
540 return rhashtable_init(&indr_setup_block_ht,
541 &flow_indr_setup_block_ht_params);
543 subsys_initcall(init_flow_indr_rhashtable);