1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 TCPHDR_PSH | TCPHDR_URG)
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 (FLOW_DIS_IS_FRAGMENT | \
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 BIT(FLOW_DISSECTOR_KEY_IP))
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
58 #define NFP_FLOWER_MERGE_FIELDS \
59 (NFP_FLOWER_LAYER_PORT | \
60 NFP_FLOWER_LAYER_MAC | \
61 NFP_FLOWER_LAYER_TP | \
62 NFP_FLOWER_LAYER_IPV4 | \
63 NFP_FLOWER_LAYER_IPV6)
65 struct nfp_flower_merge_check {
69 struct nfp_flower_mac_mpls l2;
70 struct nfp_flower_tp_ports l4;
72 struct nfp_flower_ipv4 ipv4;
73 struct nfp_flower_ipv6 ipv6;
76 unsigned long vals[8];
81 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
84 u32 meta_len, key_len, mask_len, act_len, tot_len;
88 meta_len = sizeof(struct nfp_fl_rule_metadata);
89 key_len = nfp_flow->meta.key_len;
90 mask_len = nfp_flow->meta.mask_len;
91 act_len = nfp_flow->meta.act_len;
93 tot_len = meta_len + key_len + mask_len + act_len;
95 /* Convert to long words as firmware expects
96 * lengths in units of NFP_FL_LW_SIZ.
98 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
99 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
100 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
102 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
106 msg = nfp_flower_cmsg_get_data(skb);
107 memcpy(msg, &nfp_flow->meta, meta_len);
108 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
109 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
110 memcpy(&msg[meta_len + key_len + mask_len],
111 nfp_flow->action_data, act_len);
113 /* Convert back to bytes as software expects
114 * lengths in units of bytes.
116 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
117 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
118 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
120 nfp_ctrl_tx(app->ctrl, skb);
125 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
127 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
129 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
130 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
131 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
132 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
135 static bool nfp_flower_check_higher_than_l3(struct tc_cls_flower_offload *f)
137 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
139 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
140 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
144 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
145 u32 *key_layer_two, int *key_size,
146 struct netlink_ext_ack *extack)
148 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
149 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
153 if (enc_opts->len > 0) {
154 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
155 *key_size += sizeof(struct nfp_flower_geneve_options);
162 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
163 struct flow_dissector_key_enc_opts *enc_op,
164 u32 *key_layer_two, u8 *key_layer, int *key_size,
165 struct nfp_flower_priv *priv,
166 enum nfp_flower_tun_type *tun_type,
167 struct netlink_ext_ack *extack)
171 switch (enc_ports->dst) {
172 case htons(IANA_VXLAN_UDP_PORT):
173 *tun_type = NFP_FL_TUNNEL_VXLAN;
174 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
175 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
178 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
182 case htons(GENEVE_UDP_PORT):
183 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
184 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
187 *tun_type = NFP_FL_TUNNEL_GENEVE;
188 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
189 *key_size += sizeof(struct nfp_flower_ext_meta);
190 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
191 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
195 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
196 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
199 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
205 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
213 nfp_flower_calculate_key_layers(struct nfp_app *app,
214 struct net_device *netdev,
215 struct nfp_fl_key_ls *ret_key_ls,
216 struct tc_cls_flower_offload *flow,
217 enum nfp_flower_tun_type *tun_type,
218 struct netlink_ext_ack *extack)
220 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
221 struct flow_dissector *dissector = rule->match.dissector;
222 struct flow_match_basic basic = { NULL, NULL};
223 struct nfp_flower_priv *priv = app->priv;
229 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
230 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
234 /* If any tun dissector is used then the required set must be used. */
235 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
236 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
237 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
238 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
243 key_layer = NFP_FLOWER_LAYER_PORT;
244 key_size = sizeof(struct nfp_flower_meta_tci) +
245 sizeof(struct nfp_flower_in_port);
247 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
248 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
249 key_layer |= NFP_FLOWER_LAYER_MAC;
250 key_size += sizeof(struct nfp_flower_mac_mpls);
253 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
254 struct flow_match_vlan vlan;
256 flow_rule_match_vlan(rule, &vlan);
257 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
258 vlan.key->vlan_priority) {
259 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
264 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
265 struct flow_match_enc_opts enc_op = { NULL, NULL };
266 struct flow_match_ipv4_addrs ipv4_addrs;
267 struct flow_match_control enc_ctl;
268 struct flow_match_ports enc_ports;
270 flow_rule_match_enc_control(rule, &enc_ctl);
272 if (enc_ctl.mask->addr_type != 0xffff) {
273 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
276 if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
277 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
281 /* These fields are already verified as used. */
282 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
283 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
284 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
288 flow_rule_match_enc_ports(rule, &enc_ports);
289 if (enc_ports.mask->dst != cpu_to_be16(~0)) {
290 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
294 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
295 flow_rule_match_enc_opts(rule, &enc_op);
298 err = nfp_flower_calc_udp_tun_layer(enc_ports.key, enc_op.key,
299 &key_layer_two, &key_layer,
300 &key_size, priv, tun_type,
305 /* Ensure the ingress netdev matches the expected tun type. */
306 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
307 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
312 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
313 flow_rule_match_basic(rule, &basic);
315 if (basic.mask && basic.mask->n_proto) {
316 /* Ethernet type is present in the key. */
317 switch (basic.key->n_proto) {
318 case cpu_to_be16(ETH_P_IP):
319 key_layer |= NFP_FLOWER_LAYER_IPV4;
320 key_size += sizeof(struct nfp_flower_ipv4);
323 case cpu_to_be16(ETH_P_IPV6):
324 key_layer |= NFP_FLOWER_LAYER_IPV6;
325 key_size += sizeof(struct nfp_flower_ipv6);
328 /* Currently we do not offload ARP
329 * because we rely on it to get to the host.
331 case cpu_to_be16(ETH_P_ARP):
332 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
335 case cpu_to_be16(ETH_P_MPLS_UC):
336 case cpu_to_be16(ETH_P_MPLS_MC):
337 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
338 key_layer |= NFP_FLOWER_LAYER_MAC;
339 key_size += sizeof(struct nfp_flower_mac_mpls);
343 /* Will be included in layer 2. */
344 case cpu_to_be16(ETH_P_8021Q):
348 /* Other ethtype - we need check the masks for the
349 * remainder of the key to ensure we can offload.
351 if (nfp_flower_check_higher_than_mac(flow)) {
352 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: non IPv4/IPv6 offload with L3/L4 matches not supported");
359 if (basic.mask && basic.mask->ip_proto) {
360 switch (basic.key->ip_proto) {
366 key_layer |= NFP_FLOWER_LAYER_TP;
367 key_size += sizeof(struct nfp_flower_tp_ports);
370 /* Other ip proto - we need check the masks for the
371 * remainder of the key to ensure we can offload.
373 if (nfp_flower_check_higher_than_l3(flow)) {
374 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unknown IP protocol with L4 matches not supported");
381 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
382 struct flow_match_tcp tcp;
385 flow_rule_match_tcp(rule, &tcp);
386 tcp_flags = be16_to_cpu(tcp.key->flags);
388 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
389 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
393 /* We only support PSH and URG flags when either
394 * FIN, SYN or RST is present as well.
396 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
397 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
398 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
402 /* We need to store TCP flags in the either the IPv4 or IPv6 key
403 * space, thus we need to ensure we include a IPv4/IPv6 key
404 * layer if we have not done so already.
407 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
411 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
412 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
413 switch (basic.key->n_proto) {
414 case cpu_to_be16(ETH_P_IP):
415 key_layer |= NFP_FLOWER_LAYER_IPV4;
416 key_size += sizeof(struct nfp_flower_ipv4);
419 case cpu_to_be16(ETH_P_IPV6):
420 key_layer |= NFP_FLOWER_LAYER_IPV6;
421 key_size += sizeof(struct nfp_flower_ipv6);
425 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
431 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
432 struct flow_match_control ctl;
434 flow_rule_match_control(rule, &ctl);
435 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
436 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
441 ret_key_ls->key_layer = key_layer;
442 ret_key_ls->key_layer_two = key_layer_two;
443 ret_key_ls->key_size = key_size;
448 static struct nfp_fl_payload *
449 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
451 struct nfp_fl_payload *flow_pay;
453 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
457 flow_pay->meta.key_len = key_layer->key_size;
458 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
459 if (!flow_pay->unmasked_data)
462 flow_pay->meta.mask_len = key_layer->key_size;
463 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
464 if (!flow_pay->mask_data)
465 goto err_free_unmasked;
467 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
468 if (!flow_pay->action_data)
471 flow_pay->nfp_tun_ipv4_addr = 0;
472 flow_pay->meta.flags = 0;
473 INIT_LIST_HEAD(&flow_pay->linked_flows);
474 flow_pay->in_hw = false;
479 kfree(flow_pay->mask_data);
481 kfree(flow_pay->unmasked_data);
488 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
489 struct nfp_flower_merge_check *merge,
490 u8 *last_act_id, int *act_out)
492 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
493 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
494 struct nfp_fl_set_ip4_addrs *ipv4_add;
495 struct nfp_fl_set_ipv6_addr *ipv6_add;
496 struct nfp_fl_push_vlan *push_vlan;
497 struct nfp_fl_set_tport *tport;
498 struct nfp_fl_set_eth *eth;
499 struct nfp_fl_act_head *a;
500 unsigned int act_off = 0;
505 while (act_off < flow->meta.act_len) {
506 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
510 case NFP_FL_ACTION_OPCODE_OUTPUT:
514 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
515 push_vlan = (struct nfp_fl_push_vlan *)a;
516 if (push_vlan->vlan_tci)
517 merge->tci = cpu_to_be16(0xffff);
519 case NFP_FL_ACTION_OPCODE_POP_VLAN:
520 merge->tci = cpu_to_be16(0);
522 case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
523 /* New tunnel header means l2 to l4 can be matched. */
524 eth_broadcast_addr(&merge->l2.mac_dst[0]);
525 eth_broadcast_addr(&merge->l2.mac_src[0]);
526 memset(&merge->l4, 0xff,
527 sizeof(struct nfp_flower_tp_ports));
528 memset(&merge->ipv4, 0xff,
529 sizeof(struct nfp_flower_ipv4));
531 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
532 eth = (struct nfp_fl_set_eth *)a;
533 for (i = 0; i < ETH_ALEN; i++)
534 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
535 for (i = 0; i < ETH_ALEN; i++)
536 merge->l2.mac_src[i] |=
537 eth->eth_addr_mask[ETH_ALEN + i];
539 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
540 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
541 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
542 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
544 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
545 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
546 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
547 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
549 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
550 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
551 for (i = 0; i < 4; i++)
552 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
553 ipv6_add->ipv6[i].mask;
555 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
556 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
557 for (i = 0; i < 4; i++)
558 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
559 ipv6_add->ipv6[i].mask;
561 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
562 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
563 merge->ipv6.ip_ext.ttl |=
564 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
565 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
566 merge->ipv6.ipv6_flow_label_exthdr |=
567 ipv6_tc_hl_fl->ipv6_label_mask;
569 case NFP_FL_ACTION_OPCODE_SET_UDP:
570 case NFP_FL_ACTION_OPCODE_SET_TCP:
571 tport = (struct nfp_fl_set_tport *)a;
572 ports = (u8 *)&merge->l4.port_src;
573 for (i = 0; i < 4; i++)
574 ports[i] |= tport->tp_port_mask[i];
576 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
577 case NFP_FL_ACTION_OPCODE_PRE_LAG:
578 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
584 act_off += a->len_lw << NFP_FL_LW_SIZ;
588 *last_act_id = act_id;
594 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
595 struct nfp_flower_merge_check *merge,
598 struct nfp_flower_meta_tci *meta_tci;
599 u8 *mask = flow->mask_data;
600 u8 key_layer, match_size;
602 memset(merge, 0, sizeof(struct nfp_flower_merge_check));
604 meta_tci = (struct nfp_flower_meta_tci *)mask;
605 key_layer = meta_tci->nfp_flow_key_layer;
607 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
610 merge->tci = meta_tci->tci;
611 mask += sizeof(struct nfp_flower_meta_tci);
613 if (key_layer & NFP_FLOWER_LAYER_EXT_META)
614 mask += sizeof(struct nfp_flower_ext_meta);
616 mask += sizeof(struct nfp_flower_in_port);
618 if (key_layer & NFP_FLOWER_LAYER_MAC) {
619 match_size = sizeof(struct nfp_flower_mac_mpls);
620 memcpy(&merge->l2, mask, match_size);
624 if (key_layer & NFP_FLOWER_LAYER_TP) {
625 match_size = sizeof(struct nfp_flower_tp_ports);
626 memcpy(&merge->l4, mask, match_size);
630 if (key_layer & NFP_FLOWER_LAYER_IPV4) {
631 match_size = sizeof(struct nfp_flower_ipv4);
632 memcpy(&merge->ipv4, mask, match_size);
635 if (key_layer & NFP_FLOWER_LAYER_IPV6) {
636 match_size = sizeof(struct nfp_flower_ipv6);
637 memcpy(&merge->ipv6, mask, match_size);
644 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
645 struct nfp_fl_payload *sub_flow2)
647 /* Two flows can be merged if sub_flow2 only matches on bits that are
648 * either matched by sub_flow1 or set by a sub_flow1 action. This
649 * ensures that every packet that hits sub_flow1 and recirculates is
650 * guaranteed to hit sub_flow2.
652 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
653 int err, act_out = 0;
656 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
661 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
666 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
667 &last_act_id, &act_out);
671 /* Must only be 1 output action and it must be the last in sequence. */
672 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
675 /* Reject merge if sub_flow2 matches on something that is not matched
676 * on or set in an action by sub_flow1.
678 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
679 sub_flow1_merge.vals,
680 sizeof(struct nfp_flower_merge_check) * 8);
688 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
691 unsigned int act_off = 0, act_len;
692 struct nfp_fl_act_head *a;
695 while (act_off < len) {
696 a = (struct nfp_fl_act_head *)&act_src[act_off];
697 act_len = a->len_lw << NFP_FL_LW_SIZ;
701 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
705 case NFP_FL_ACTION_OPCODE_PRE_LAG:
706 memcpy(act_dst + act_off, act_src + act_off, act_len);
718 static int nfp_fl_verify_post_tun_acts(char *acts, int len)
720 struct nfp_fl_act_head *a;
721 unsigned int act_off = 0;
723 while (act_off < len) {
724 a = (struct nfp_fl_act_head *)&acts[act_off];
725 if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
728 act_off += a->len_lw << NFP_FL_LW_SIZ;
735 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
736 struct nfp_fl_payload *sub_flow2,
737 struct nfp_fl_payload *merge_flow)
739 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
740 bool tunnel_act = false;
744 /* The last action of sub_flow1 must be output - do not merge this. */
745 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
746 sub2_act_len = sub_flow2->meta.act_len;
751 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
754 /* A shortcut can only be applied if there is a single action. */
756 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
758 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
760 merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
761 merge_act = merge_flow->action_data;
763 /* Copy any pre-actions to the start of merge flow action list. */
764 pre_off1 = nfp_flower_copy_pre_actions(merge_act,
765 sub_flow1->action_data,
766 sub1_act_len, &tunnel_act);
767 merge_act += pre_off1;
768 sub1_act_len -= pre_off1;
769 pre_off2 = nfp_flower_copy_pre_actions(merge_act,
770 sub_flow2->action_data,
772 merge_act += pre_off2;
773 sub2_act_len -= pre_off2;
775 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
776 * a tunnel, sub_flow 2 can only have output actions for a valid merge.
779 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
781 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
786 /* Copy remaining actions from sub_flows 1 and 2. */
787 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
788 merge_act += sub1_act_len;
789 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
794 /* Flow link code should only be accessed under RTNL. */
795 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
797 list_del(&link->merge_flow.list);
798 list_del(&link->sub_flow.list);
802 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
803 struct nfp_fl_payload *sub_flow)
805 struct nfp_fl_payload_link *link;
807 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
808 if (link->sub_flow.flow == sub_flow) {
809 nfp_flower_unlink_flow(link);
814 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
815 struct nfp_fl_payload *sub_flow)
817 struct nfp_fl_payload_link *link;
819 link = kmalloc(sizeof(*link), GFP_KERNEL);
823 link->merge_flow.flow = merge_flow;
824 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
825 link->sub_flow.flow = sub_flow;
826 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
832 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
833 * @app: Pointer to the APP handle
834 * @sub_flow1: Initial flow matched to produce merge hint
835 * @sub_flow2: Post recirculation flow matched in merge hint
837 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
838 * and offloading the new, merged flow.
840 * Return: negative value on error, 0 in success.
842 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
843 struct nfp_fl_payload *sub_flow1,
844 struct nfp_fl_payload *sub_flow2)
846 struct tc_cls_flower_offload merge_tc_off;
847 struct nfp_flower_priv *priv = app->priv;
848 struct netlink_ext_ack *extack = NULL;
849 struct nfp_fl_payload *merge_flow;
850 struct nfp_fl_key_ls merge_key_ls;
855 extack = merge_tc_off.common.extack;
856 if (sub_flow1 == sub_flow2 ||
857 nfp_flower_is_merge_flow(sub_flow1) ||
858 nfp_flower_is_merge_flow(sub_flow2))
861 err = nfp_flower_can_merge(sub_flow1, sub_flow2);
865 merge_key_ls.key_size = sub_flow1->meta.key_len;
867 merge_flow = nfp_flower_allocate_new(&merge_key_ls);
871 merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
872 merge_flow->ingress_dev = sub_flow1->ingress_dev;
874 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
875 sub_flow1->meta.key_len);
876 memcpy(merge_flow->mask_data, sub_flow1->mask_data,
877 sub_flow1->meta.mask_len);
879 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
881 goto err_destroy_merge_flow;
883 err = nfp_flower_link_flows(merge_flow, sub_flow1);
885 goto err_destroy_merge_flow;
887 err = nfp_flower_link_flows(merge_flow, sub_flow2);
889 goto err_unlink_sub_flow1;
891 merge_tc_off.cookie = merge_flow->tc_flower_cookie;
892 err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
893 merge_flow->ingress_dev, extack);
895 goto err_unlink_sub_flow2;
897 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
898 nfp_flower_table_params);
900 goto err_release_metadata;
902 err = nfp_flower_xmit_flow(app, merge_flow,
903 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
905 goto err_remove_rhash;
907 merge_flow->in_hw = true;
908 sub_flow1->in_hw = false;
913 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
914 &merge_flow->fl_node,
915 nfp_flower_table_params));
916 err_release_metadata:
917 nfp_modify_flow_metadata(app, merge_flow);
918 err_unlink_sub_flow2:
919 nfp_flower_unlink_flows(merge_flow, sub_flow2);
920 err_unlink_sub_flow1:
921 nfp_flower_unlink_flows(merge_flow, sub_flow1);
922 err_destroy_merge_flow:
923 kfree(merge_flow->action_data);
924 kfree(merge_flow->mask_data);
925 kfree(merge_flow->unmasked_data);
931 * nfp_flower_add_offload() - Adds a new flow to hardware.
932 * @app: Pointer to the APP handle
933 * @netdev: netdev structure.
934 * @flow: TC flower classifier offload structure.
936 * Adds a new flow to the repeated hash structure and action payload.
938 * Return: negative value on error, 0 if configured successfully.
941 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
942 struct tc_cls_flower_offload *flow)
944 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
945 struct nfp_flower_priv *priv = app->priv;
946 struct netlink_ext_ack *extack = NULL;
947 struct nfp_fl_payload *flow_pay;
948 struct nfp_fl_key_ls *key_layer;
949 struct nfp_port *port = NULL;
952 extack = flow->common.extack;
953 if (nfp_netdev_is_nfp_repr(netdev))
954 port = nfp_port_from_netdev(netdev);
956 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
960 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
963 goto err_free_key_ls;
965 flow_pay = nfp_flower_allocate_new(key_layer);
968 goto err_free_key_ls;
971 err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
972 flow_pay, tun_type, extack);
974 goto err_destroy_flow;
976 err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
978 goto err_destroy_flow;
980 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
982 goto err_destroy_flow;
984 flow_pay->tc_flower_cookie = flow->cookie;
985 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
986 nfp_flower_table_params);
988 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
989 goto err_release_metadata;
992 err = nfp_flower_xmit_flow(app, flow_pay,
993 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
995 goto err_remove_rhash;
998 port->tc_offload_cnt++;
1000 flow_pay->in_hw = true;
1002 /* Deallocate flow payload when flower rule has been destroyed. */
1008 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1010 nfp_flower_table_params));
1011 err_release_metadata:
1012 nfp_modify_flow_metadata(app, flow_pay);
1014 kfree(flow_pay->action_data);
1015 kfree(flow_pay->mask_data);
1016 kfree(flow_pay->unmasked_data);
1024 nfp_flower_remove_merge_flow(struct nfp_app *app,
1025 struct nfp_fl_payload *del_sub_flow,
1026 struct nfp_fl_payload *merge_flow)
1028 struct nfp_flower_priv *priv = app->priv;
1029 struct nfp_fl_payload_link *link, *temp;
1030 struct nfp_fl_payload *origin;
1034 link = list_first_entry(&merge_flow->linked_flows,
1035 struct nfp_fl_payload_link, merge_flow.list);
1036 origin = link->sub_flow.flow;
1038 /* Re-add rule the merge had overwritten if it has not been deleted. */
1039 if (origin != del_sub_flow)
1042 err = nfp_modify_flow_metadata(app, merge_flow);
1044 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1045 goto err_free_links;
1049 err = nfp_flower_xmit_flow(app, merge_flow,
1050 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1052 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1053 goto err_free_links;
1056 __nfp_modify_flow_metadata(priv, origin);
1057 err = nfp_flower_xmit_flow(app, origin,
1058 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1060 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1061 origin->in_hw = true;
1065 /* Clean any links connected with the merged flow. */
1066 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1068 nfp_flower_unlink_flow(link);
1070 kfree(merge_flow->action_data);
1071 kfree(merge_flow->mask_data);
1072 kfree(merge_flow->unmasked_data);
1073 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1074 &merge_flow->fl_node,
1075 nfp_flower_table_params));
1076 kfree_rcu(merge_flow, rcu);
1080 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1081 struct nfp_fl_payload *sub_flow)
1083 struct nfp_fl_payload_link *link, *temp;
1085 /* Remove any merge flow formed from the deleted sub_flow. */
1086 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1088 nfp_flower_remove_merge_flow(app, sub_flow,
1089 link->merge_flow.flow);
1093 * nfp_flower_del_offload() - Removes a flow from hardware.
1094 * @app: Pointer to the APP handle
1095 * @netdev: netdev structure.
1096 * @flow: TC flower classifier offload structure
1098 * Removes a flow from the repeated hash structure and clears the
1099 * action payload. Any flows merged from this are also deleted.
1101 * Return: negative value on error, 0 if removed successfully.
1104 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1105 struct tc_cls_flower_offload *flow)
1107 struct nfp_flower_priv *priv = app->priv;
1108 struct netlink_ext_ack *extack = NULL;
1109 struct nfp_fl_payload *nfp_flow;
1110 struct nfp_port *port = NULL;
1113 extack = flow->common.extack;
1114 if (nfp_netdev_is_nfp_repr(netdev))
1115 port = nfp_port_from_netdev(netdev);
1117 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1119 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1123 err = nfp_modify_flow_metadata(app, nfp_flow);
1125 goto err_free_merge_flow;
1127 if (nfp_flow->nfp_tun_ipv4_addr)
1128 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1130 if (!nfp_flow->in_hw) {
1132 goto err_free_merge_flow;
1135 err = nfp_flower_xmit_flow(app, nfp_flow,
1136 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1137 /* Fall through on error. */
1139 err_free_merge_flow:
1140 nfp_flower_del_linked_merge_flows(app, nfp_flow);
1142 port->tc_offload_cnt--;
1143 kfree(nfp_flow->action_data);
1144 kfree(nfp_flow->mask_data);
1145 kfree(nfp_flow->unmasked_data);
1146 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1148 nfp_flower_table_params));
1149 kfree_rcu(nfp_flow, rcu);
1154 __nfp_flower_update_merge_stats(struct nfp_app *app,
1155 struct nfp_fl_payload *merge_flow)
1157 struct nfp_flower_priv *priv = app->priv;
1158 struct nfp_fl_payload_link *link;
1159 struct nfp_fl_payload *sub_flow;
1160 u64 pkts, bytes, used;
1163 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1164 pkts = priv->stats[ctx_id].pkts;
1165 /* Do not cycle subflows if no stats to distribute. */
1168 bytes = priv->stats[ctx_id].bytes;
1169 used = priv->stats[ctx_id].used;
1171 /* Reset stats for the merge flow. */
1172 priv->stats[ctx_id].pkts = 0;
1173 priv->stats[ctx_id].bytes = 0;
1175 /* The merge flow has received stats updates from firmware.
1176 * Distribute these stats to all subflows that form the merge.
1177 * The stats will collected from TC via the subflows.
1179 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1180 sub_flow = link->sub_flow.flow;
1181 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1182 priv->stats[ctx_id].pkts += pkts;
1183 priv->stats[ctx_id].bytes += bytes;
1184 max_t(u64, priv->stats[ctx_id].used, used);
1189 nfp_flower_update_merge_stats(struct nfp_app *app,
1190 struct nfp_fl_payload *sub_flow)
1192 struct nfp_fl_payload_link *link;
1194 /* Get merge flows that the subflow forms to distribute their stats. */
1195 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1196 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1200 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1201 * @app: Pointer to the APP handle
1202 * @netdev: Netdev structure.
1203 * @flow: TC flower classifier offload structure
1205 * Populates a flow statistics structure which which corresponds to a
1208 * Return: negative value on error, 0 if stats populated successfully.
1211 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1212 struct tc_cls_flower_offload *flow)
1214 struct nfp_flower_priv *priv = app->priv;
1215 struct netlink_ext_ack *extack = NULL;
1216 struct nfp_fl_payload *nfp_flow;
1219 extack = flow->common.extack;
1220 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1222 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1226 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1228 spin_lock_bh(&priv->stats_lock);
1229 /* If request is for a sub_flow, update stats from merged flows. */
1230 if (!list_empty(&nfp_flow->linked_flows))
1231 nfp_flower_update_merge_stats(app, nfp_flow);
1233 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1234 priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1236 priv->stats[ctx_id].pkts = 0;
1237 priv->stats[ctx_id].bytes = 0;
1238 spin_unlock_bh(&priv->stats_lock);
1244 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1245 struct tc_cls_flower_offload *flower)
1247 if (!eth_proto_is_802_3(flower->common.protocol))
1250 switch (flower->command) {
1251 case TC_CLSFLOWER_REPLACE:
1252 return nfp_flower_add_offload(app, netdev, flower);
1253 case TC_CLSFLOWER_DESTROY:
1254 return nfp_flower_del_offload(app, netdev, flower);
1255 case TC_CLSFLOWER_STATS:
1256 return nfp_flower_get_stats(app, netdev, flower);
1262 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1263 void *type_data, void *cb_priv)
1265 struct nfp_repr *repr = cb_priv;
1267 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1271 case TC_SETUP_CLSFLOWER:
1272 return nfp_flower_repr_offload(repr->app, repr->netdev,
1274 case TC_SETUP_CLSMATCHALL:
1275 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1282 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1283 struct tc_block_offload *f)
1285 struct nfp_repr *repr = netdev_priv(netdev);
1286 struct nfp_flower_repr_priv *repr_priv;
1288 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1291 repr_priv = repr->app_priv;
1292 repr_priv->block_shared = tcf_block_shared(f->block);
1294 switch (f->command) {
1296 return tcf_block_cb_register(f->block,
1297 nfp_flower_setup_tc_block_cb,
1298 repr, repr, f->extack);
1299 case TC_BLOCK_UNBIND:
1300 tcf_block_cb_unregister(f->block,
1301 nfp_flower_setup_tc_block_cb,
1309 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1310 enum tc_setup_type type, void *type_data)
1313 case TC_SETUP_BLOCK:
1314 return nfp_flower_setup_tc_block(netdev, type_data);
1320 struct nfp_flower_indr_block_cb_priv {
1321 struct net_device *netdev;
1322 struct nfp_app *app;
1323 struct list_head list;
1326 static struct nfp_flower_indr_block_cb_priv *
1327 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1328 struct net_device *netdev)
1330 struct nfp_flower_indr_block_cb_priv *cb_priv;
1331 struct nfp_flower_priv *priv = app->priv;
1333 /* All callback list access should be protected by RTNL. */
1336 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1337 if (cb_priv->netdev == netdev)
1343 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1344 void *type_data, void *cb_priv)
1346 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1347 struct tc_cls_flower_offload *flower = type_data;
1349 if (flower->common.chain_index)
1353 case TC_SETUP_CLSFLOWER:
1354 return nfp_flower_repr_offload(priv->app, priv->netdev,
1362 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1363 struct tc_block_offload *f)
1365 struct nfp_flower_indr_block_cb_priv *cb_priv;
1366 struct nfp_flower_priv *priv = app->priv;
1369 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1370 !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1371 nfp_flower_internal_port_can_offload(app, netdev)))
1374 switch (f->command) {
1376 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1380 cb_priv->netdev = netdev;
1382 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1384 err = tcf_block_cb_register(f->block,
1385 nfp_flower_setup_indr_block_cb,
1386 cb_priv, cb_priv, f->extack);
1388 list_del(&cb_priv->list);
1393 case TC_BLOCK_UNBIND:
1394 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1398 tcf_block_cb_unregister(f->block,
1399 nfp_flower_setup_indr_block_cb,
1401 list_del(&cb_priv->list);
1412 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1413 enum tc_setup_type type, void *type_data)
1416 case TC_SETUP_BLOCK:
1417 return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1424 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1425 struct net_device *netdev,
1426 unsigned long event)
1430 if (!nfp_fl_is_netdev_to_offload(netdev))
1433 if (event == NETDEV_REGISTER) {
1434 err = __tc_indr_block_cb_register(netdev, app,
1435 nfp_flower_indr_setup_tc_cb,
1438 nfp_flower_cmsg_warn(app,
1439 "Indirect block reg failed - %s\n",
1441 } else if (event == NETDEV_UNREGISTER) {
1442 __tc_indr_block_cb_unregister(netdev,
1443 nfp_flower_indr_setup_tc_cb, app);