1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 TCPHDR_PSH | TCPHDR_URG)
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 (FLOW_DIS_IS_FRAGMENT | \
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 BIT(FLOW_DISSECTOR_KEY_IP))
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
58 #define NFP_FLOWER_MERGE_FIELDS \
59 (NFP_FLOWER_LAYER_PORT | \
60 NFP_FLOWER_LAYER_MAC | \
61 NFP_FLOWER_LAYER_TP | \
62 NFP_FLOWER_LAYER_IPV4 | \
63 NFP_FLOWER_LAYER_IPV6)
65 struct nfp_flower_merge_check {
69 struct nfp_flower_mac_mpls l2;
70 struct nfp_flower_tp_ports l4;
72 struct nfp_flower_ipv4 ipv4;
73 struct nfp_flower_ipv6 ipv6;
76 unsigned long vals[8];
81 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
84 u32 meta_len, key_len, mask_len, act_len, tot_len;
88 meta_len = sizeof(struct nfp_fl_rule_metadata);
89 key_len = nfp_flow->meta.key_len;
90 mask_len = nfp_flow->meta.mask_len;
91 act_len = nfp_flow->meta.act_len;
93 tot_len = meta_len + key_len + mask_len + act_len;
95 /* Convert to long words as firmware expects
96 * lengths in units of NFP_FL_LW_SIZ.
98 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
99 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
100 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
102 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
106 msg = nfp_flower_cmsg_get_data(skb);
107 memcpy(msg, &nfp_flow->meta, meta_len);
108 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
109 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
110 memcpy(&msg[meta_len + key_len + mask_len],
111 nfp_flow->action_data, act_len);
113 /* Convert back to bytes as software expects
114 * lengths in units of bytes.
116 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
117 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
118 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
120 nfp_ctrl_tx(app->ctrl, skb);
125 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
127 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
129 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
130 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
131 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
132 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
135 static bool nfp_flower_check_higher_than_l3(struct tc_cls_flower_offload *f)
137 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
139 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
140 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
144 nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
145 u32 *key_layer_two, int *key_size,
146 struct netlink_ext_ack *extack)
148 if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
149 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
153 if (enc_opts->key->len > 0) {
154 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
155 *key_size += sizeof(struct nfp_flower_geneve_options);
162 nfp_flower_calculate_key_layers(struct nfp_app *app,
163 struct net_device *netdev,
164 struct nfp_fl_key_ls *ret_key_ls,
165 struct tc_cls_flower_offload *flow,
166 enum nfp_flower_tun_type *tun_type,
167 struct netlink_ext_ack *extack)
169 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
170 struct flow_dissector *dissector = rule->match.dissector;
171 struct flow_match_basic basic = { NULL, NULL};
172 struct nfp_flower_priv *priv = app->priv;
178 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
179 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
183 /* If any tun dissector is used then the required set must be used. */
184 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
185 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
186 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
187 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
192 key_layer = NFP_FLOWER_LAYER_PORT;
193 key_size = sizeof(struct nfp_flower_meta_tci) +
194 sizeof(struct nfp_flower_in_port);
196 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
197 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
198 key_layer |= NFP_FLOWER_LAYER_MAC;
199 key_size += sizeof(struct nfp_flower_mac_mpls);
202 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
203 struct flow_match_vlan vlan;
205 flow_rule_match_vlan(rule, &vlan);
206 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
207 vlan.key->vlan_priority) {
208 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
213 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
214 struct flow_match_enc_opts enc_op = { NULL, NULL };
215 struct flow_match_ipv4_addrs ipv4_addrs;
216 struct flow_match_control enc_ctl;
217 struct flow_match_ports enc_ports;
219 flow_rule_match_enc_control(rule, &enc_ctl);
221 if (enc_ctl.mask->addr_type != 0xffff) {
222 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
225 if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
226 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
230 /* These fields are already verified as used. */
231 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
232 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
233 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
237 flow_rule_match_enc_ports(rule, &enc_ports);
238 if (enc_ports.mask->dst != cpu_to_be16(~0)) {
239 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
243 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
244 flow_rule_match_enc_opts(rule, &enc_op);
246 switch (enc_ports.key->dst) {
247 case htons(IANA_VXLAN_UDP_PORT):
248 *tun_type = NFP_FL_TUNNEL_VXLAN;
249 key_layer |= NFP_FLOWER_LAYER_VXLAN;
250 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
253 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
257 case htons(GENEVE_UDP_PORT):
258 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
259 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
262 *tun_type = NFP_FL_TUNNEL_GENEVE;
263 key_layer |= NFP_FLOWER_LAYER_EXT_META;
264 key_size += sizeof(struct nfp_flower_ext_meta);
265 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
266 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
270 if (!(priv->flower_ext_feats &
271 NFP_FL_FEATS_GENEVE_OPT)) {
272 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
275 err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
281 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
285 /* Ensure the ingress netdev matches the expected tun type. */
286 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
287 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
292 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
293 flow_rule_match_basic(rule, &basic);
295 if (basic.mask && basic.mask->n_proto) {
296 /* Ethernet type is present in the key. */
297 switch (basic.key->n_proto) {
298 case cpu_to_be16(ETH_P_IP):
299 key_layer |= NFP_FLOWER_LAYER_IPV4;
300 key_size += sizeof(struct nfp_flower_ipv4);
303 case cpu_to_be16(ETH_P_IPV6):
304 key_layer |= NFP_FLOWER_LAYER_IPV6;
305 key_size += sizeof(struct nfp_flower_ipv6);
308 /* Currently we do not offload ARP
309 * because we rely on it to get to the host.
311 case cpu_to_be16(ETH_P_ARP):
312 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
315 case cpu_to_be16(ETH_P_MPLS_UC):
316 case cpu_to_be16(ETH_P_MPLS_MC):
317 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
318 key_layer |= NFP_FLOWER_LAYER_MAC;
319 key_size += sizeof(struct nfp_flower_mac_mpls);
323 /* Will be included in layer 2. */
324 case cpu_to_be16(ETH_P_8021Q):
328 /* Other ethtype - we need check the masks for the
329 * remainder of the key to ensure we can offload.
331 if (nfp_flower_check_higher_than_mac(flow)) {
332 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: non IPv4/IPv6 offload with L3/L4 matches not supported");
339 if (basic.mask && basic.mask->ip_proto) {
340 switch (basic.key->ip_proto) {
346 key_layer |= NFP_FLOWER_LAYER_TP;
347 key_size += sizeof(struct nfp_flower_tp_ports);
350 /* Other ip proto - we need check the masks for the
351 * remainder of the key to ensure we can offload.
353 if (nfp_flower_check_higher_than_l3(flow)) {
354 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unknown IP protocol with L4 matches not supported");
361 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
362 struct flow_match_tcp tcp;
365 flow_rule_match_tcp(rule, &tcp);
366 tcp_flags = be16_to_cpu(tcp.key->flags);
368 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
369 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
373 /* We only support PSH and URG flags when either
374 * FIN, SYN or RST is present as well.
376 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
377 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
378 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
382 /* We need to store TCP flags in the either the IPv4 or IPv6 key
383 * space, thus we need to ensure we include a IPv4/IPv6 key
384 * layer if we have not done so already.
387 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
391 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
392 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
393 switch (basic.key->n_proto) {
394 case cpu_to_be16(ETH_P_IP):
395 key_layer |= NFP_FLOWER_LAYER_IPV4;
396 key_size += sizeof(struct nfp_flower_ipv4);
399 case cpu_to_be16(ETH_P_IPV6):
400 key_layer |= NFP_FLOWER_LAYER_IPV6;
401 key_size += sizeof(struct nfp_flower_ipv6);
405 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
411 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
412 struct flow_match_control ctl;
414 flow_rule_match_control(rule, &ctl);
415 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
416 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
421 ret_key_ls->key_layer = key_layer;
422 ret_key_ls->key_layer_two = key_layer_two;
423 ret_key_ls->key_size = key_size;
428 static struct nfp_fl_payload *
429 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
431 struct nfp_fl_payload *flow_pay;
433 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
437 flow_pay->meta.key_len = key_layer->key_size;
438 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
439 if (!flow_pay->unmasked_data)
442 flow_pay->meta.mask_len = key_layer->key_size;
443 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
444 if (!flow_pay->mask_data)
445 goto err_free_unmasked;
447 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
448 if (!flow_pay->action_data)
451 flow_pay->nfp_tun_ipv4_addr = 0;
452 flow_pay->meta.flags = 0;
453 INIT_LIST_HEAD(&flow_pay->linked_flows);
454 flow_pay->in_hw = false;
459 kfree(flow_pay->mask_data);
461 kfree(flow_pay->unmasked_data);
468 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
469 struct nfp_flower_merge_check *merge,
470 u8 *last_act_id, int *act_out)
472 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
473 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
474 struct nfp_fl_set_ip4_addrs *ipv4_add;
475 struct nfp_fl_set_ipv6_addr *ipv6_add;
476 struct nfp_fl_push_vlan *push_vlan;
477 struct nfp_fl_set_tport *tport;
478 struct nfp_fl_set_eth *eth;
479 struct nfp_fl_act_head *a;
480 unsigned int act_off = 0;
485 while (act_off < flow->meta.act_len) {
486 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
490 case NFP_FL_ACTION_OPCODE_OUTPUT:
494 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
495 push_vlan = (struct nfp_fl_push_vlan *)a;
496 if (push_vlan->vlan_tci)
497 merge->tci = cpu_to_be16(0xffff);
499 case NFP_FL_ACTION_OPCODE_POP_VLAN:
500 merge->tci = cpu_to_be16(0);
502 case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
503 /* New tunnel header means l2 to l4 can be matched. */
504 eth_broadcast_addr(&merge->l2.mac_dst[0]);
505 eth_broadcast_addr(&merge->l2.mac_src[0]);
506 memset(&merge->l4, 0xff,
507 sizeof(struct nfp_flower_tp_ports));
508 memset(&merge->ipv4, 0xff,
509 sizeof(struct nfp_flower_ipv4));
511 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
512 eth = (struct nfp_fl_set_eth *)a;
513 for (i = 0; i < ETH_ALEN; i++)
514 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
515 for (i = 0; i < ETH_ALEN; i++)
516 merge->l2.mac_src[i] |=
517 eth->eth_addr_mask[ETH_ALEN + i];
519 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
520 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
521 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
522 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
524 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
525 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
526 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
527 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
529 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
530 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
531 for (i = 0; i < 4; i++)
532 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
533 ipv6_add->ipv6[i].mask;
535 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
536 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
537 for (i = 0; i < 4; i++)
538 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
539 ipv6_add->ipv6[i].mask;
541 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
542 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
543 merge->ipv6.ip_ext.ttl |=
544 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
545 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
546 merge->ipv6.ipv6_flow_label_exthdr |=
547 ipv6_tc_hl_fl->ipv6_label_mask;
549 case NFP_FL_ACTION_OPCODE_SET_UDP:
550 case NFP_FL_ACTION_OPCODE_SET_TCP:
551 tport = (struct nfp_fl_set_tport *)a;
552 ports = (u8 *)&merge->l4.port_src;
553 for (i = 0; i < 4; i++)
554 ports[i] |= tport->tp_port_mask[i];
556 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
557 case NFP_FL_ACTION_OPCODE_PRE_LAG:
558 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
564 act_off += a->len_lw << NFP_FL_LW_SIZ;
568 *last_act_id = act_id;
574 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
575 struct nfp_flower_merge_check *merge,
578 struct nfp_flower_meta_tci *meta_tci;
579 u8 *mask = flow->mask_data;
580 u8 key_layer, match_size;
582 memset(merge, 0, sizeof(struct nfp_flower_merge_check));
584 meta_tci = (struct nfp_flower_meta_tci *)mask;
585 key_layer = meta_tci->nfp_flow_key_layer;
587 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
590 merge->tci = meta_tci->tci;
591 mask += sizeof(struct nfp_flower_meta_tci);
593 if (key_layer & NFP_FLOWER_LAYER_EXT_META)
594 mask += sizeof(struct nfp_flower_ext_meta);
596 mask += sizeof(struct nfp_flower_in_port);
598 if (key_layer & NFP_FLOWER_LAYER_MAC) {
599 match_size = sizeof(struct nfp_flower_mac_mpls);
600 memcpy(&merge->l2, mask, match_size);
604 if (key_layer & NFP_FLOWER_LAYER_TP) {
605 match_size = sizeof(struct nfp_flower_tp_ports);
606 memcpy(&merge->l4, mask, match_size);
610 if (key_layer & NFP_FLOWER_LAYER_IPV4) {
611 match_size = sizeof(struct nfp_flower_ipv4);
612 memcpy(&merge->ipv4, mask, match_size);
615 if (key_layer & NFP_FLOWER_LAYER_IPV6) {
616 match_size = sizeof(struct nfp_flower_ipv6);
617 memcpy(&merge->ipv6, mask, match_size);
624 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
625 struct nfp_fl_payload *sub_flow2)
627 /* Two flows can be merged if sub_flow2 only matches on bits that are
628 * either matched by sub_flow1 or set by a sub_flow1 action. This
629 * ensures that every packet that hits sub_flow1 and recirculates is
630 * guaranteed to hit sub_flow2.
632 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
633 int err, act_out = 0;
636 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
641 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
646 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
647 &last_act_id, &act_out);
651 /* Must only be 1 output action and it must be the last in sequence. */
652 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
655 /* Reject merge if sub_flow2 matches on something that is not matched
656 * on or set in an action by sub_flow1.
658 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
659 sub_flow1_merge.vals,
660 sizeof(struct nfp_flower_merge_check) * 8);
668 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
671 unsigned int act_off = 0, act_len;
672 struct nfp_fl_act_head *a;
675 while (act_off < len) {
676 a = (struct nfp_fl_act_head *)&act_src[act_off];
677 act_len = a->len_lw << NFP_FL_LW_SIZ;
681 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
685 case NFP_FL_ACTION_OPCODE_PRE_LAG:
686 memcpy(act_dst + act_off, act_src + act_off, act_len);
698 static int nfp_fl_verify_post_tun_acts(char *acts, int len)
700 struct nfp_fl_act_head *a;
701 unsigned int act_off = 0;
703 while (act_off < len) {
704 a = (struct nfp_fl_act_head *)&acts[act_off];
705 if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
708 act_off += a->len_lw << NFP_FL_LW_SIZ;
715 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
716 struct nfp_fl_payload *sub_flow2,
717 struct nfp_fl_payload *merge_flow)
719 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
720 bool tunnel_act = false;
724 /* The last action of sub_flow1 must be output - do not merge this. */
725 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
726 sub2_act_len = sub_flow2->meta.act_len;
731 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
734 /* A shortcut can only be applied if there is a single action. */
736 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
738 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
740 merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
741 merge_act = merge_flow->action_data;
743 /* Copy any pre-actions to the start of merge flow action list. */
744 pre_off1 = nfp_flower_copy_pre_actions(merge_act,
745 sub_flow1->action_data,
746 sub1_act_len, &tunnel_act);
747 merge_act += pre_off1;
748 sub1_act_len -= pre_off1;
749 pre_off2 = nfp_flower_copy_pre_actions(merge_act,
750 sub_flow2->action_data,
752 merge_act += pre_off2;
753 sub2_act_len -= pre_off2;
755 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
756 * a tunnel, sub_flow 2 can only have output actions for a valid merge.
759 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
761 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
766 /* Copy remaining actions from sub_flows 1 and 2. */
767 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
768 merge_act += sub1_act_len;
769 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
774 /* Flow link code should only be accessed under RTNL. */
775 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
777 list_del(&link->merge_flow.list);
778 list_del(&link->sub_flow.list);
782 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
783 struct nfp_fl_payload *sub_flow)
785 struct nfp_fl_payload_link *link;
787 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
788 if (link->sub_flow.flow == sub_flow) {
789 nfp_flower_unlink_flow(link);
794 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
795 struct nfp_fl_payload *sub_flow)
797 struct nfp_fl_payload_link *link;
799 link = kmalloc(sizeof(*link), GFP_KERNEL);
803 link->merge_flow.flow = merge_flow;
804 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
805 link->sub_flow.flow = sub_flow;
806 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
812 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
813 * @app: Pointer to the APP handle
814 * @sub_flow1: Initial flow matched to produce merge hint
815 * @sub_flow2: Post recirculation flow matched in merge hint
817 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
818 * and offloading the new, merged flow.
820 * Return: negative value on error, 0 in success.
822 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
823 struct nfp_fl_payload *sub_flow1,
824 struct nfp_fl_payload *sub_flow2)
826 struct tc_cls_flower_offload merge_tc_off;
827 struct nfp_flower_priv *priv = app->priv;
828 struct netlink_ext_ack *extack = NULL;
829 struct nfp_fl_payload *merge_flow;
830 struct nfp_fl_key_ls merge_key_ls;
835 extack = merge_tc_off.common.extack;
836 if (sub_flow1 == sub_flow2 ||
837 nfp_flower_is_merge_flow(sub_flow1) ||
838 nfp_flower_is_merge_flow(sub_flow2))
841 err = nfp_flower_can_merge(sub_flow1, sub_flow2);
845 merge_key_ls.key_size = sub_flow1->meta.key_len;
847 merge_flow = nfp_flower_allocate_new(&merge_key_ls);
851 merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
852 merge_flow->ingress_dev = sub_flow1->ingress_dev;
854 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
855 sub_flow1->meta.key_len);
856 memcpy(merge_flow->mask_data, sub_flow1->mask_data,
857 sub_flow1->meta.mask_len);
859 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
861 goto err_destroy_merge_flow;
863 err = nfp_flower_link_flows(merge_flow, sub_flow1);
865 goto err_destroy_merge_flow;
867 err = nfp_flower_link_flows(merge_flow, sub_flow2);
869 goto err_unlink_sub_flow1;
871 merge_tc_off.cookie = merge_flow->tc_flower_cookie;
872 err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
873 merge_flow->ingress_dev, extack);
875 goto err_unlink_sub_flow2;
877 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
878 nfp_flower_table_params);
880 goto err_release_metadata;
882 err = nfp_flower_xmit_flow(app, merge_flow,
883 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
885 goto err_remove_rhash;
887 merge_flow->in_hw = true;
888 sub_flow1->in_hw = false;
893 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
894 &merge_flow->fl_node,
895 nfp_flower_table_params));
896 err_release_metadata:
897 nfp_modify_flow_metadata(app, merge_flow);
898 err_unlink_sub_flow2:
899 nfp_flower_unlink_flows(merge_flow, sub_flow2);
900 err_unlink_sub_flow1:
901 nfp_flower_unlink_flows(merge_flow, sub_flow1);
902 err_destroy_merge_flow:
903 kfree(merge_flow->action_data);
904 kfree(merge_flow->mask_data);
905 kfree(merge_flow->unmasked_data);
911 * nfp_flower_add_offload() - Adds a new flow to hardware.
912 * @app: Pointer to the APP handle
913 * @netdev: netdev structure.
914 * @flow: TC flower classifier offload structure.
916 * Adds a new flow to the repeated hash structure and action payload.
918 * Return: negative value on error, 0 if configured successfully.
921 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
922 struct tc_cls_flower_offload *flow)
924 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
925 struct nfp_flower_priv *priv = app->priv;
926 struct netlink_ext_ack *extack = NULL;
927 struct nfp_fl_payload *flow_pay;
928 struct nfp_fl_key_ls *key_layer;
929 struct nfp_port *port = NULL;
932 extack = flow->common.extack;
933 if (nfp_netdev_is_nfp_repr(netdev))
934 port = nfp_port_from_netdev(netdev);
936 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
940 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
943 goto err_free_key_ls;
945 flow_pay = nfp_flower_allocate_new(key_layer);
948 goto err_free_key_ls;
951 err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
952 flow_pay, tun_type, extack);
954 goto err_destroy_flow;
956 err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
958 goto err_destroy_flow;
960 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
962 goto err_destroy_flow;
964 flow_pay->tc_flower_cookie = flow->cookie;
965 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
966 nfp_flower_table_params);
968 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
969 goto err_release_metadata;
972 err = nfp_flower_xmit_flow(app, flow_pay,
973 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
975 goto err_remove_rhash;
978 port->tc_offload_cnt++;
980 flow_pay->in_hw = true;
982 /* Deallocate flow payload when flower rule has been destroyed. */
988 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
990 nfp_flower_table_params));
991 err_release_metadata:
992 nfp_modify_flow_metadata(app, flow_pay);
994 kfree(flow_pay->action_data);
995 kfree(flow_pay->mask_data);
996 kfree(flow_pay->unmasked_data);
1004 nfp_flower_remove_merge_flow(struct nfp_app *app,
1005 struct nfp_fl_payload *del_sub_flow,
1006 struct nfp_fl_payload *merge_flow)
1008 struct nfp_flower_priv *priv = app->priv;
1009 struct nfp_fl_payload_link *link, *temp;
1010 struct nfp_fl_payload *origin;
1014 link = list_first_entry(&merge_flow->linked_flows,
1015 struct nfp_fl_payload_link, merge_flow.list);
1016 origin = link->sub_flow.flow;
1018 /* Re-add rule the merge had overwritten if it has not been deleted. */
1019 if (origin != del_sub_flow)
1022 err = nfp_modify_flow_metadata(app, merge_flow);
1024 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1025 goto err_free_links;
1029 err = nfp_flower_xmit_flow(app, merge_flow,
1030 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1032 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1033 goto err_free_links;
1036 __nfp_modify_flow_metadata(priv, origin);
1037 err = nfp_flower_xmit_flow(app, origin,
1038 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1040 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1041 origin->in_hw = true;
1045 /* Clean any links connected with the merged flow. */
1046 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1048 nfp_flower_unlink_flow(link);
1050 kfree(merge_flow->action_data);
1051 kfree(merge_flow->mask_data);
1052 kfree(merge_flow->unmasked_data);
1053 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1054 &merge_flow->fl_node,
1055 nfp_flower_table_params));
1056 kfree_rcu(merge_flow, rcu);
1060 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1061 struct nfp_fl_payload *sub_flow)
1063 struct nfp_fl_payload_link *link, *temp;
1065 /* Remove any merge flow formed from the deleted sub_flow. */
1066 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1068 nfp_flower_remove_merge_flow(app, sub_flow,
1069 link->merge_flow.flow);
1073 * nfp_flower_del_offload() - Removes a flow from hardware.
1074 * @app: Pointer to the APP handle
1075 * @netdev: netdev structure.
1076 * @flow: TC flower classifier offload structure
1078 * Removes a flow from the repeated hash structure and clears the
1079 * action payload. Any flows merged from this are also deleted.
1081 * Return: negative value on error, 0 if removed successfully.
1084 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1085 struct tc_cls_flower_offload *flow)
1087 struct nfp_flower_priv *priv = app->priv;
1088 struct netlink_ext_ack *extack = NULL;
1089 struct nfp_fl_payload *nfp_flow;
1090 struct nfp_port *port = NULL;
1093 extack = flow->common.extack;
1094 if (nfp_netdev_is_nfp_repr(netdev))
1095 port = nfp_port_from_netdev(netdev);
1097 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1099 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1103 err = nfp_modify_flow_metadata(app, nfp_flow);
1105 goto err_free_merge_flow;
1107 if (nfp_flow->nfp_tun_ipv4_addr)
1108 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1110 if (!nfp_flow->in_hw) {
1112 goto err_free_merge_flow;
1115 err = nfp_flower_xmit_flow(app, nfp_flow,
1116 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1117 /* Fall through on error. */
1119 err_free_merge_flow:
1120 nfp_flower_del_linked_merge_flows(app, nfp_flow);
1122 port->tc_offload_cnt--;
1123 kfree(nfp_flow->action_data);
1124 kfree(nfp_flow->mask_data);
1125 kfree(nfp_flow->unmasked_data);
1126 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1128 nfp_flower_table_params));
1129 kfree_rcu(nfp_flow, rcu);
1134 __nfp_flower_update_merge_stats(struct nfp_app *app,
1135 struct nfp_fl_payload *merge_flow)
1137 struct nfp_flower_priv *priv = app->priv;
1138 struct nfp_fl_payload_link *link;
1139 struct nfp_fl_payload *sub_flow;
1140 u64 pkts, bytes, used;
1143 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1144 pkts = priv->stats[ctx_id].pkts;
1145 /* Do not cycle subflows if no stats to distribute. */
1148 bytes = priv->stats[ctx_id].bytes;
1149 used = priv->stats[ctx_id].used;
1151 /* Reset stats for the merge flow. */
1152 priv->stats[ctx_id].pkts = 0;
1153 priv->stats[ctx_id].bytes = 0;
1155 /* The merge flow has received stats updates from firmware.
1156 * Distribute these stats to all subflows that form the merge.
1157 * The stats will collected from TC via the subflows.
1159 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1160 sub_flow = link->sub_flow.flow;
1161 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1162 priv->stats[ctx_id].pkts += pkts;
1163 priv->stats[ctx_id].bytes += bytes;
1164 max_t(u64, priv->stats[ctx_id].used, used);
1169 nfp_flower_update_merge_stats(struct nfp_app *app,
1170 struct nfp_fl_payload *sub_flow)
1172 struct nfp_fl_payload_link *link;
1174 /* Get merge flows that the subflow forms to distribute their stats. */
1175 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1176 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1180 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1181 * @app: Pointer to the APP handle
1182 * @netdev: Netdev structure.
1183 * @flow: TC flower classifier offload structure
1185 * Populates a flow statistics structure which which corresponds to a
1188 * Return: negative value on error, 0 if stats populated successfully.
1191 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1192 struct tc_cls_flower_offload *flow)
1194 struct nfp_flower_priv *priv = app->priv;
1195 struct netlink_ext_ack *extack = NULL;
1196 struct nfp_fl_payload *nfp_flow;
1199 extack = flow->common.extack;
1200 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1202 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1206 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1208 spin_lock_bh(&priv->stats_lock);
1209 /* If request is for a sub_flow, update stats from merged flows. */
1210 if (!list_empty(&nfp_flow->linked_flows))
1211 nfp_flower_update_merge_stats(app, nfp_flow);
1213 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1214 priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1216 priv->stats[ctx_id].pkts = 0;
1217 priv->stats[ctx_id].bytes = 0;
1218 spin_unlock_bh(&priv->stats_lock);
1224 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1225 struct tc_cls_flower_offload *flower)
1227 if (!eth_proto_is_802_3(flower->common.protocol))
1230 switch (flower->command) {
1231 case TC_CLSFLOWER_REPLACE:
1232 return nfp_flower_add_offload(app, netdev, flower);
1233 case TC_CLSFLOWER_DESTROY:
1234 return nfp_flower_del_offload(app, netdev, flower);
1235 case TC_CLSFLOWER_STATS:
1236 return nfp_flower_get_stats(app, netdev, flower);
1242 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1243 void *type_data, void *cb_priv)
1245 struct nfp_repr *repr = cb_priv;
1247 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1251 case TC_SETUP_CLSFLOWER:
1252 return nfp_flower_repr_offload(repr->app, repr->netdev,
1254 case TC_SETUP_CLSMATCHALL:
1255 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1262 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1263 struct tc_block_offload *f)
1265 struct nfp_repr *repr = netdev_priv(netdev);
1266 struct nfp_flower_repr_priv *repr_priv;
1268 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1271 repr_priv = repr->app_priv;
1272 repr_priv->block_shared = tcf_block_shared(f->block);
1274 switch (f->command) {
1276 return tcf_block_cb_register(f->block,
1277 nfp_flower_setup_tc_block_cb,
1278 repr, repr, f->extack);
1279 case TC_BLOCK_UNBIND:
1280 tcf_block_cb_unregister(f->block,
1281 nfp_flower_setup_tc_block_cb,
1289 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1290 enum tc_setup_type type, void *type_data)
1293 case TC_SETUP_BLOCK:
1294 return nfp_flower_setup_tc_block(netdev, type_data);
1300 struct nfp_flower_indr_block_cb_priv {
1301 struct net_device *netdev;
1302 struct nfp_app *app;
1303 struct list_head list;
1306 static struct nfp_flower_indr_block_cb_priv *
1307 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1308 struct net_device *netdev)
1310 struct nfp_flower_indr_block_cb_priv *cb_priv;
1311 struct nfp_flower_priv *priv = app->priv;
1313 /* All callback list access should be protected by RTNL. */
1316 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1317 if (cb_priv->netdev == netdev)
1323 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1324 void *type_data, void *cb_priv)
1326 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1327 struct tc_cls_flower_offload *flower = type_data;
1329 if (flower->common.chain_index)
1333 case TC_SETUP_CLSFLOWER:
1334 return nfp_flower_repr_offload(priv->app, priv->netdev,
1342 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1343 struct tc_block_offload *f)
1345 struct nfp_flower_indr_block_cb_priv *cb_priv;
1346 struct nfp_flower_priv *priv = app->priv;
1349 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1350 !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1351 nfp_flower_internal_port_can_offload(app, netdev)))
1354 switch (f->command) {
1356 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1360 cb_priv->netdev = netdev;
1362 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1364 err = tcf_block_cb_register(f->block,
1365 nfp_flower_setup_indr_block_cb,
1366 cb_priv, cb_priv, f->extack);
1368 list_del(&cb_priv->list);
1373 case TC_BLOCK_UNBIND:
1374 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1378 tcf_block_cb_unregister(f->block,
1379 nfp_flower_setup_indr_block_cb,
1381 list_del(&cb_priv->list);
1392 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1393 enum tc_setup_type type, void *type_data)
1396 case TC_SETUP_BLOCK:
1397 return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1404 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1405 struct net_device *netdev,
1406 unsigned long event)
1410 if (!nfp_fl_is_netdev_to_offload(netdev))
1413 if (event == NETDEV_REGISTER) {
1414 err = __tc_indr_block_cb_register(netdev, app,
1415 nfp_flower_indr_setup_tc_cb,
1418 nfp_flower_cmsg_warn(app,
1419 "Indirect block reg failed - %s\n",
1421 } else if (event == NETDEV_UNREGISTER) {
1422 __tc_indr_block_cb_unregister(netdev,
1423 nfp_flower_indr_setup_tc_cb, app);