1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
8 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
10 struct flow_rule *rule;
13 rule = kzalloc(struct_size(rule, action.entries, num_actions),
18 rule->action.num_entries = num_actions;
19 /* Pre-fill each action hw_stats with DONT_CARE.
20 * Caller can override this if it wants stats for a given action.
22 for (i = 0; i < num_actions; i++)
23 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
27 EXPORT_SYMBOL(flow_rule_alloc);
29 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
30 const struct flow_match *__m = &(__rule)->match; \
31 struct flow_dissector *__d = (__m)->dissector; \
33 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
34 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
36 void flow_rule_match_meta(const struct flow_rule *rule,
37 struct flow_match_meta *out)
39 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
41 EXPORT_SYMBOL(flow_rule_match_meta);
43 void flow_rule_match_basic(const struct flow_rule *rule,
44 struct flow_match_basic *out)
46 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
48 EXPORT_SYMBOL(flow_rule_match_basic);
50 void flow_rule_match_control(const struct flow_rule *rule,
51 struct flow_match_control *out)
53 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
55 EXPORT_SYMBOL(flow_rule_match_control);
57 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
58 struct flow_match_eth_addrs *out)
60 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
62 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
64 void flow_rule_match_vlan(const struct flow_rule *rule,
65 struct flow_match_vlan *out)
67 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
69 EXPORT_SYMBOL(flow_rule_match_vlan);
71 void flow_rule_match_cvlan(const struct flow_rule *rule,
72 struct flow_match_vlan *out)
74 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
76 EXPORT_SYMBOL(flow_rule_match_cvlan);
78 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
79 struct flow_match_ipv4_addrs *out)
81 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
83 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
85 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
86 struct flow_match_ipv6_addrs *out)
88 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
90 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
92 void flow_rule_match_ip(const struct flow_rule *rule,
93 struct flow_match_ip *out)
95 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
97 EXPORT_SYMBOL(flow_rule_match_ip);
99 void flow_rule_match_ports(const struct flow_rule *rule,
100 struct flow_match_ports *out)
102 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
104 EXPORT_SYMBOL(flow_rule_match_ports);
106 void flow_rule_match_tcp(const struct flow_rule *rule,
107 struct flow_match_tcp *out)
109 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
111 EXPORT_SYMBOL(flow_rule_match_tcp);
113 void flow_rule_match_icmp(const struct flow_rule *rule,
114 struct flow_match_icmp *out)
116 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
118 EXPORT_SYMBOL(flow_rule_match_icmp);
120 void flow_rule_match_mpls(const struct flow_rule *rule,
121 struct flow_match_mpls *out)
123 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
125 EXPORT_SYMBOL(flow_rule_match_mpls);
127 void flow_rule_match_enc_control(const struct flow_rule *rule,
128 struct flow_match_control *out)
130 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
132 EXPORT_SYMBOL(flow_rule_match_enc_control);
134 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
135 struct flow_match_ipv4_addrs *out)
137 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
139 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
141 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
142 struct flow_match_ipv6_addrs *out)
144 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
146 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
148 void flow_rule_match_enc_ip(const struct flow_rule *rule,
149 struct flow_match_ip *out)
151 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
153 EXPORT_SYMBOL(flow_rule_match_enc_ip);
155 void flow_rule_match_enc_ports(const struct flow_rule *rule,
156 struct flow_match_ports *out)
158 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
160 EXPORT_SYMBOL(flow_rule_match_enc_ports);
162 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
163 struct flow_match_enc_keyid *out)
165 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
167 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
169 void flow_rule_match_enc_opts(const struct flow_rule *rule,
170 struct flow_match_enc_opts *out)
172 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
174 EXPORT_SYMBOL(flow_rule_match_enc_opts);
176 struct flow_action_cookie *flow_action_cookie_create(void *data,
180 struct flow_action_cookie *cookie;
182 cookie = kmalloc(sizeof(*cookie) + len, gfp);
185 cookie->cookie_len = len;
186 memcpy(cookie->cookie, data, len);
189 EXPORT_SYMBOL(flow_action_cookie_create);
191 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
195 EXPORT_SYMBOL(flow_action_cookie_destroy);
197 void flow_rule_match_ct(const struct flow_rule *rule,
198 struct flow_match_ct *out)
200 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
202 EXPORT_SYMBOL(flow_rule_match_ct);
204 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
205 void *cb_ident, void *cb_priv,
206 void (*release)(void *cb_priv))
208 struct flow_block_cb *block_cb;
210 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
212 return ERR_PTR(-ENOMEM);
215 block_cb->cb_ident = cb_ident;
216 block_cb->cb_priv = cb_priv;
217 block_cb->release = release;
221 EXPORT_SYMBOL(flow_block_cb_alloc);
223 void flow_block_cb_free(struct flow_block_cb *block_cb)
225 if (block_cb->release)
226 block_cb->release(block_cb->cb_priv);
230 EXPORT_SYMBOL(flow_block_cb_free);
232 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
233 flow_setup_cb_t *cb, void *cb_ident)
235 struct flow_block_cb *block_cb;
237 list_for_each_entry(block_cb, &block->cb_list, list) {
238 if (block_cb->cb == cb &&
239 block_cb->cb_ident == cb_ident)
245 EXPORT_SYMBOL(flow_block_cb_lookup);
247 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
249 return block_cb->cb_priv;
251 EXPORT_SYMBOL(flow_block_cb_priv);
253 void flow_block_cb_incref(struct flow_block_cb *block_cb)
257 EXPORT_SYMBOL(flow_block_cb_incref);
259 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
261 return --block_cb->refcnt;
263 EXPORT_SYMBOL(flow_block_cb_decref);
265 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
266 struct list_head *driver_block_list)
268 struct flow_block_cb *block_cb;
270 list_for_each_entry(block_cb, driver_block_list, driver_list) {
271 if (block_cb->cb == cb &&
272 block_cb->cb_ident == cb_ident)
278 EXPORT_SYMBOL(flow_block_cb_is_busy);
280 int flow_block_cb_setup_simple(struct flow_block_offload *f,
281 struct list_head *driver_block_list,
283 void *cb_ident, void *cb_priv,
286 struct flow_block_cb *block_cb;
289 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
292 f->driver_block_list = driver_block_list;
294 switch (f->command) {
295 case FLOW_BLOCK_BIND:
296 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
299 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
300 if (IS_ERR(block_cb))
301 return PTR_ERR(block_cb);
303 flow_block_cb_add(block_cb, f);
304 list_add_tail(&block_cb->driver_list, driver_block_list);
306 case FLOW_BLOCK_UNBIND:
307 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
311 flow_block_cb_remove(block_cb, f);
312 list_del(&block_cb->driver_list);
318 EXPORT_SYMBOL(flow_block_cb_setup_simple);
320 static DEFINE_MUTEX(flow_indr_block_lock);
321 static LIST_HEAD(flow_block_indr_list);
322 static LIST_HEAD(flow_block_indr_dev_list);
324 struct flow_indr_dev {
325 struct list_head list;
326 flow_indr_block_bind_cb_t *cb;
332 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
335 struct flow_indr_dev *indr_dev;
337 indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
342 indr_dev->cb_priv = cb_priv;
343 refcount_set(&indr_dev->refcnt, 1);
348 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
350 struct flow_indr_dev *indr_dev;
352 mutex_lock(&flow_indr_block_lock);
353 list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
354 if (indr_dev->cb == cb &&
355 indr_dev->cb_priv == cb_priv) {
356 refcount_inc(&indr_dev->refcnt);
357 mutex_unlock(&flow_indr_block_lock);
362 indr_dev = flow_indr_dev_alloc(cb, cb_priv);
364 mutex_unlock(&flow_indr_block_lock);
368 list_add(&indr_dev->list, &flow_block_indr_dev_list);
369 mutex_unlock(&flow_indr_block_lock);
373 EXPORT_SYMBOL(flow_indr_dev_register);
375 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
377 struct list_head *cleanup_list)
379 struct flow_block_cb *this, *next;
381 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
382 if (this->release == release &&
383 this->indr.cb_priv == cb_priv) {
384 list_move(&this->indr.list, cleanup_list);
390 static void flow_block_indr_notify(struct list_head *cleanup_list)
392 struct flow_block_cb *this, *next;
394 list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
395 list_del(&this->indr.list);
396 this->indr.cleanup(this);
400 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
401 void (*release)(void *cb_priv))
403 struct flow_indr_dev *this, *next, *indr_dev = NULL;
404 LIST_HEAD(cleanup_list);
406 mutex_lock(&flow_indr_block_lock);
407 list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
408 if (this->cb == cb &&
409 this->cb_priv == cb_priv &&
410 refcount_dec_and_test(&this->refcnt)) {
412 list_del(&indr_dev->list);
418 mutex_unlock(&flow_indr_block_lock);
422 __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
423 mutex_unlock(&flow_indr_block_lock);
425 flow_block_indr_notify(&cleanup_list);
428 EXPORT_SYMBOL(flow_indr_dev_unregister);
430 static void flow_block_indr_init(struct flow_block_cb *flow_block,
431 struct flow_block_offload *bo,
432 struct net_device *dev, void *data,
434 void (*cleanup)(struct flow_block_cb *block_cb))
436 flow_block->indr.binder_type = bo->binder_type;
437 flow_block->indr.data = data;
438 flow_block->indr.cb_priv = cb_priv;
439 flow_block->indr.dev = dev;
440 flow_block->indr.cleanup = cleanup;
443 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
444 void *cb_ident, void *cb_priv,
445 void (*release)(void *cb_priv),
446 struct flow_block_offload *bo,
447 struct net_device *dev, void *data,
449 void (*cleanup)(struct flow_block_cb *block_cb))
451 struct flow_block_cb *block_cb;
453 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
454 if (IS_ERR(block_cb))
457 flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
458 list_add(&block_cb->indr.list, &flow_block_indr_list);
463 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
465 int flow_indr_dev_setup_offload(struct net_device *dev,
466 enum tc_setup_type type, void *data,
467 struct flow_block_offload *bo,
468 void (*cleanup)(struct flow_block_cb *block_cb))
470 struct flow_indr_dev *this;
472 mutex_lock(&flow_indr_block_lock);
473 list_for_each_entry(this, &flow_block_indr_dev_list, list)
474 this->cb(dev, this->cb_priv, type, bo, data, cleanup);
476 mutex_unlock(&flow_indr_block_lock);
478 return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
480 EXPORT_SYMBOL(flow_indr_dev_setup_offload);