Merge tag 'soundwire-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[linux-2.6-microblaze.git] / drivers / net / ethernet / netronome / nfp / flower / offload.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18         (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19          TCPHDR_PSH | TCPHDR_URG)
20
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22         (FLOW_DIS_IS_FRAGMENT | \
23          FLOW_DIS_FIRST_FRAG)
24
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26         (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27          BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28          BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29          BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30          BIT(FLOW_DISSECTOR_KEY_TCP) | \
31          BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32          BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33          BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37          BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40          BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41          BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42          BIT(FLOW_DISSECTOR_KEY_IP))
43
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51          BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
56
57 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
58         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
59          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
60
61 #define NFP_FLOWER_MERGE_FIELDS \
62         (NFP_FLOWER_LAYER_PORT | \
63          NFP_FLOWER_LAYER_MAC | \
64          NFP_FLOWER_LAYER_TP | \
65          NFP_FLOWER_LAYER_IPV4 | \
66          NFP_FLOWER_LAYER_IPV6)
67
68 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
69         (NFP_FLOWER_LAYER_PORT | \
70          NFP_FLOWER_LAYER_MAC | \
71          NFP_FLOWER_LAYER_IPV4 | \
72          NFP_FLOWER_LAYER_IPV6)
73
74 struct nfp_flower_merge_check {
75         union {
76                 struct {
77                         __be16 tci;
78                         struct nfp_flower_mac_mpls l2;
79                         struct nfp_flower_tp_ports l4;
80                         union {
81                                 struct nfp_flower_ipv4 ipv4;
82                                 struct nfp_flower_ipv6 ipv6;
83                         };
84                 };
85                 unsigned long vals[8];
86         };
87 };
88
89 static int
90 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
91                      u8 mtype)
92 {
93         u32 meta_len, key_len, mask_len, act_len, tot_len;
94         struct sk_buff *skb;
95         unsigned char *msg;
96
97         meta_len =  sizeof(struct nfp_fl_rule_metadata);
98         key_len = nfp_flow->meta.key_len;
99         mask_len = nfp_flow->meta.mask_len;
100         act_len = nfp_flow->meta.act_len;
101
102         tot_len = meta_len + key_len + mask_len + act_len;
103
104         /* Convert to long words as firmware expects
105          * lengths in units of NFP_FL_LW_SIZ.
106          */
107         nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
108         nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
109         nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
110
111         skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
112         if (!skb)
113                 return -ENOMEM;
114
115         msg = nfp_flower_cmsg_get_data(skb);
116         memcpy(msg, &nfp_flow->meta, meta_len);
117         memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
118         memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
119         memcpy(&msg[meta_len + key_len + mask_len],
120                nfp_flow->action_data, act_len);
121
122         /* Convert back to bytes as software expects
123          * lengths in units of bytes.
124          */
125         nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
126         nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
127         nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
128
129         nfp_ctrl_tx(app->ctrl, skb);
130
131         return 0;
132 }
133
134 static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
135 {
136         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
137
138         return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
139                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
140                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
141                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
142 }
143
144 static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
145 {
146         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
147
148         return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
149                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
150 }
151
152 static int
153 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
154                           u32 *key_layer_two, int *key_size, bool ipv6,
155                           struct netlink_ext_ack *extack)
156 {
157         if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
158             (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
159                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
160                 return -EOPNOTSUPP;
161         }
162
163         if (enc_opts->len > 0) {
164                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
165                 *key_size += sizeof(struct nfp_flower_geneve_options);
166         }
167
168         return 0;
169 }
170
171 static int
172 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
173                               struct flow_dissector_key_enc_opts *enc_op,
174                               u32 *key_layer_two, u8 *key_layer, int *key_size,
175                               struct nfp_flower_priv *priv,
176                               enum nfp_flower_tun_type *tun_type, bool ipv6,
177                               struct netlink_ext_ack *extack)
178 {
179         int err;
180
181         switch (enc_ports->dst) {
182         case htons(IANA_VXLAN_UDP_PORT):
183                 *tun_type = NFP_FL_TUNNEL_VXLAN;
184                 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
185
186                 if (ipv6) {
187                         *key_layer |= NFP_FLOWER_LAYER_EXT_META;
188                         *key_size += sizeof(struct nfp_flower_ext_meta);
189                         *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
190                         *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
191                 } else {
192                         *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
193                 }
194
195                 if (enc_op) {
196                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
197                         return -EOPNOTSUPP;
198                 }
199                 break;
200         case htons(GENEVE_UDP_PORT):
201                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
202                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
203                         return -EOPNOTSUPP;
204                 }
205                 *tun_type = NFP_FL_TUNNEL_GENEVE;
206                 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
207                 *key_size += sizeof(struct nfp_flower_ext_meta);
208                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
209
210                 if (ipv6) {
211                         *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
212                         *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
213                 } else {
214                         *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
215                 }
216
217                 if (!enc_op)
218                         break;
219                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
220                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
221                         return -EOPNOTSUPP;
222                 }
223                 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
224                                                 ipv6, extack);
225                 if (err)
226                         return err;
227                 break;
228         default:
229                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
230                 return -EOPNOTSUPP;
231         }
232
233         return 0;
234 }
235
236 static int
237 nfp_flower_calculate_key_layers(struct nfp_app *app,
238                                 struct net_device *netdev,
239                                 struct nfp_fl_key_ls *ret_key_ls,
240                                 struct flow_cls_offload *flow,
241                                 enum nfp_flower_tun_type *tun_type,
242                                 struct netlink_ext_ack *extack)
243 {
244         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
245         struct flow_dissector *dissector = rule->match.dissector;
246         struct flow_match_basic basic = { NULL, NULL};
247         struct nfp_flower_priv *priv = app->priv;
248         u32 key_layer_two;
249         u8 key_layer;
250         int key_size;
251         int err;
252
253         if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
254                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
255                 return -EOPNOTSUPP;
256         }
257
258         /* If any tun dissector is used then the required set must be used. */
259         if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
260             (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
261             != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
262             (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
263             != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
264                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
265                 return -EOPNOTSUPP;
266         }
267
268         key_layer_two = 0;
269         key_layer = NFP_FLOWER_LAYER_PORT;
270         key_size = sizeof(struct nfp_flower_meta_tci) +
271                    sizeof(struct nfp_flower_in_port);
272
273         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
274             flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
275                 key_layer |= NFP_FLOWER_LAYER_MAC;
276                 key_size += sizeof(struct nfp_flower_mac_mpls);
277         }
278
279         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
280                 struct flow_match_vlan vlan;
281
282                 flow_rule_match_vlan(rule, &vlan);
283                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
284                     vlan.key->vlan_priority) {
285                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
286                         return -EOPNOTSUPP;
287                 }
288         }
289
290         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
291                 struct flow_match_enc_opts enc_op = { NULL, NULL };
292                 struct flow_match_ipv4_addrs ipv4_addrs;
293                 struct flow_match_ipv6_addrs ipv6_addrs;
294                 struct flow_match_control enc_ctl;
295                 struct flow_match_ports enc_ports;
296                 bool ipv6_tun = false;
297
298                 flow_rule_match_enc_control(rule, &enc_ctl);
299
300                 if (enc_ctl.mask->addr_type != 0xffff) {
301                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
302                         return -EOPNOTSUPP;
303                 }
304
305                 ipv6_tun = enc_ctl.key->addr_type ==
306                                 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
307                 if (ipv6_tun &&
308                     !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
309                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
310                         return -EOPNOTSUPP;
311                 }
312
313                 if (!ipv6_tun &&
314                     enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
315                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
316                         return -EOPNOTSUPP;
317                 }
318
319                 if (ipv6_tun) {
320                         flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
321                         if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
322                                        sizeof(ipv6_addrs.mask->dst))) {
323                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
324                                 return -EOPNOTSUPP;
325                         }
326                 } else {
327                         flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
328                         if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
329                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
330                                 return -EOPNOTSUPP;
331                         }
332                 }
333
334                 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
335                         flow_rule_match_enc_opts(rule, &enc_op);
336
337                 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
338                         /* check if GRE, which has no enc_ports */
339                         if (!netif_is_gretap(netdev)) {
340                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
341                                 return -EOPNOTSUPP;
342                         }
343
344                         *tun_type = NFP_FL_TUNNEL_GRE;
345                         key_layer |= NFP_FLOWER_LAYER_EXT_META;
346                         key_size += sizeof(struct nfp_flower_ext_meta);
347                         key_layer_two |= NFP_FLOWER_LAYER2_GRE;
348
349                         if (ipv6_tun) {
350                                 key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
351                                 key_size +=
352                                         sizeof(struct nfp_flower_ipv6_udp_tun);
353                         } else {
354                                 key_size +=
355                                         sizeof(struct nfp_flower_ipv4_udp_tun);
356                         }
357
358                         if (enc_op.key) {
359                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
360                                 return -EOPNOTSUPP;
361                         }
362                 } else {
363                         flow_rule_match_enc_ports(rule, &enc_ports);
364                         if (enc_ports.mask->dst != cpu_to_be16(~0)) {
365                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
366                                 return -EOPNOTSUPP;
367                         }
368
369                         err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
370                                                             enc_op.key,
371                                                             &key_layer_two,
372                                                             &key_layer,
373                                                             &key_size, priv,
374                                                             tun_type, ipv6_tun,
375                                                             extack);
376                         if (err)
377                                 return err;
378
379                         /* Ensure the ingress netdev matches the expected
380                          * tun type.
381                          */
382                         if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
383                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
384                                 return -EOPNOTSUPP;
385                         }
386                 }
387         }
388
389         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
390                 flow_rule_match_basic(rule, &basic);
391
392         if (basic.mask && basic.mask->n_proto) {
393                 /* Ethernet type is present in the key. */
394                 switch (basic.key->n_proto) {
395                 case cpu_to_be16(ETH_P_IP):
396                         key_layer |= NFP_FLOWER_LAYER_IPV4;
397                         key_size += sizeof(struct nfp_flower_ipv4);
398                         break;
399
400                 case cpu_to_be16(ETH_P_IPV6):
401                         key_layer |= NFP_FLOWER_LAYER_IPV6;
402                         key_size += sizeof(struct nfp_flower_ipv6);
403                         break;
404
405                 /* Currently we do not offload ARP
406                  * because we rely on it to get to the host.
407                  */
408                 case cpu_to_be16(ETH_P_ARP):
409                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
410                         return -EOPNOTSUPP;
411
412                 case cpu_to_be16(ETH_P_MPLS_UC):
413                 case cpu_to_be16(ETH_P_MPLS_MC):
414                         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
415                                 key_layer |= NFP_FLOWER_LAYER_MAC;
416                                 key_size += sizeof(struct nfp_flower_mac_mpls);
417                         }
418                         break;
419
420                 /* Will be included in layer 2. */
421                 case cpu_to_be16(ETH_P_8021Q):
422                         break;
423
424                 default:
425                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
426                         return -EOPNOTSUPP;
427                 }
428         } else if (nfp_flower_check_higher_than_mac(flow)) {
429                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
430                 return -EOPNOTSUPP;
431         }
432
433         if (basic.mask && basic.mask->ip_proto) {
434                 switch (basic.key->ip_proto) {
435                 case IPPROTO_TCP:
436                 case IPPROTO_UDP:
437                 case IPPROTO_SCTP:
438                 case IPPROTO_ICMP:
439                 case IPPROTO_ICMPV6:
440                         key_layer |= NFP_FLOWER_LAYER_TP;
441                         key_size += sizeof(struct nfp_flower_tp_ports);
442                         break;
443                 }
444         }
445
446         if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
447             nfp_flower_check_higher_than_l3(flow)) {
448                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
449                 return -EOPNOTSUPP;
450         }
451
452         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
453                 struct flow_match_tcp tcp;
454                 u32 tcp_flags;
455
456                 flow_rule_match_tcp(rule, &tcp);
457                 tcp_flags = be16_to_cpu(tcp.key->flags);
458
459                 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
460                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
461                         return -EOPNOTSUPP;
462                 }
463
464                 /* We only support PSH and URG flags when either
465                  * FIN, SYN or RST is present as well.
466                  */
467                 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
468                     !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
469                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
470                         return -EOPNOTSUPP;
471                 }
472
473                 /* We need to store TCP flags in the either the IPv4 or IPv6 key
474                  * space, thus we need to ensure we include a IPv4/IPv6 key
475                  * layer if we have not done so already.
476                  */
477                 if (!basic.key) {
478                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
479                         return -EOPNOTSUPP;
480                 }
481
482                 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
483                     !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
484                         switch (basic.key->n_proto) {
485                         case cpu_to_be16(ETH_P_IP):
486                                 key_layer |= NFP_FLOWER_LAYER_IPV4;
487                                 key_size += sizeof(struct nfp_flower_ipv4);
488                                 break;
489
490                         case cpu_to_be16(ETH_P_IPV6):
491                                         key_layer |= NFP_FLOWER_LAYER_IPV6;
492                                 key_size += sizeof(struct nfp_flower_ipv6);
493                                 break;
494
495                         default:
496                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
497                                 return -EOPNOTSUPP;
498                         }
499                 }
500         }
501
502         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
503                 struct flow_match_control ctl;
504
505                 flow_rule_match_control(rule, &ctl);
506                 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
507                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
508                         return -EOPNOTSUPP;
509                 }
510         }
511
512         ret_key_ls->key_layer = key_layer;
513         ret_key_ls->key_layer_two = key_layer_two;
514         ret_key_ls->key_size = key_size;
515
516         return 0;
517 }
518
519 static struct nfp_fl_payload *
520 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
521 {
522         struct nfp_fl_payload *flow_pay;
523
524         flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
525         if (!flow_pay)
526                 return NULL;
527
528         flow_pay->meta.key_len = key_layer->key_size;
529         flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
530         if (!flow_pay->unmasked_data)
531                 goto err_free_flow;
532
533         flow_pay->meta.mask_len = key_layer->key_size;
534         flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
535         if (!flow_pay->mask_data)
536                 goto err_free_unmasked;
537
538         flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
539         if (!flow_pay->action_data)
540                 goto err_free_mask;
541
542         flow_pay->nfp_tun_ipv4_addr = 0;
543         flow_pay->nfp_tun_ipv6 = NULL;
544         flow_pay->meta.flags = 0;
545         INIT_LIST_HEAD(&flow_pay->linked_flows);
546         flow_pay->in_hw = false;
547         flow_pay->pre_tun_rule.dev = NULL;
548
549         return flow_pay;
550
551 err_free_mask:
552         kfree(flow_pay->mask_data);
553 err_free_unmasked:
554         kfree(flow_pay->unmasked_data);
555 err_free_flow:
556         kfree(flow_pay);
557         return NULL;
558 }
559
560 static int
561 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
562                                      struct nfp_flower_merge_check *merge,
563                                      u8 *last_act_id, int *act_out)
564 {
565         struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
566         struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
567         struct nfp_fl_set_ip4_addrs *ipv4_add;
568         struct nfp_fl_set_ipv6_addr *ipv6_add;
569         struct nfp_fl_push_vlan *push_vlan;
570         struct nfp_fl_pre_tunnel *pre_tun;
571         struct nfp_fl_set_tport *tport;
572         struct nfp_fl_set_eth *eth;
573         struct nfp_fl_act_head *a;
574         unsigned int act_off = 0;
575         bool ipv6_tun = false;
576         u8 act_id = 0;
577         u8 *ports;
578         int i;
579
580         while (act_off < flow->meta.act_len) {
581                 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
582                 act_id = a->jump_id;
583
584                 switch (act_id) {
585                 case NFP_FL_ACTION_OPCODE_OUTPUT:
586                         if (act_out)
587                                 (*act_out)++;
588                         break;
589                 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
590                         push_vlan = (struct nfp_fl_push_vlan *)a;
591                         if (push_vlan->vlan_tci)
592                                 merge->tci = cpu_to_be16(0xffff);
593                         break;
594                 case NFP_FL_ACTION_OPCODE_POP_VLAN:
595                         merge->tci = cpu_to_be16(0);
596                         break;
597                 case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
598                         /* New tunnel header means l2 to l4 can be matched. */
599                         eth_broadcast_addr(&merge->l2.mac_dst[0]);
600                         eth_broadcast_addr(&merge->l2.mac_src[0]);
601                         memset(&merge->l4, 0xff,
602                                sizeof(struct nfp_flower_tp_ports));
603                         if (ipv6_tun)
604                                 memset(&merge->ipv6, 0xff,
605                                        sizeof(struct nfp_flower_ipv6));
606                         else
607                                 memset(&merge->ipv4, 0xff,
608                                        sizeof(struct nfp_flower_ipv4));
609                         break;
610                 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
611                         eth = (struct nfp_fl_set_eth *)a;
612                         for (i = 0; i < ETH_ALEN; i++)
613                                 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
614                         for (i = 0; i < ETH_ALEN; i++)
615                                 merge->l2.mac_src[i] |=
616                                         eth->eth_addr_mask[ETH_ALEN + i];
617                         break;
618                 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
619                         ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
620                         merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
621                         merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
622                         break;
623                 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
624                         ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
625                         merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
626                         merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
627                         break;
628                 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
629                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
630                         for (i = 0; i < 4; i++)
631                                 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
632                                         ipv6_add->ipv6[i].mask;
633                         break;
634                 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
635                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
636                         for (i = 0; i < 4; i++)
637                                 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
638                                         ipv6_add->ipv6[i].mask;
639                         break;
640                 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
641                         ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
642                         merge->ipv6.ip_ext.ttl |=
643                                 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
644                         merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
645                         merge->ipv6.ipv6_flow_label_exthdr |=
646                                 ipv6_tc_hl_fl->ipv6_label_mask;
647                         break;
648                 case NFP_FL_ACTION_OPCODE_SET_UDP:
649                 case NFP_FL_ACTION_OPCODE_SET_TCP:
650                         tport = (struct nfp_fl_set_tport *)a;
651                         ports = (u8 *)&merge->l4.port_src;
652                         for (i = 0; i < 4; i++)
653                                 ports[i] |= tport->tp_port_mask[i];
654                         break;
655                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
656                         pre_tun = (struct nfp_fl_pre_tunnel *)a;
657                         ipv6_tun = be16_to_cpu(pre_tun->flags) &
658                                         NFP_FL_PRE_TUN_IPV6;
659                         break;
660                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
661                 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
662                         break;
663                 default:
664                         return -EOPNOTSUPP;
665                 }
666
667                 act_off += a->len_lw << NFP_FL_LW_SIZ;
668         }
669
670         if (last_act_id)
671                 *last_act_id = act_id;
672
673         return 0;
674 }
675
676 static int
677 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
678                                 struct nfp_flower_merge_check *merge,
679                                 bool extra_fields)
680 {
681         struct nfp_flower_meta_tci *meta_tci;
682         u8 *mask = flow->mask_data;
683         u8 key_layer, match_size;
684
685         memset(merge, 0, sizeof(struct nfp_flower_merge_check));
686
687         meta_tci = (struct nfp_flower_meta_tci *)mask;
688         key_layer = meta_tci->nfp_flow_key_layer;
689
690         if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
691                 return -EOPNOTSUPP;
692
693         merge->tci = meta_tci->tci;
694         mask += sizeof(struct nfp_flower_meta_tci);
695
696         if (key_layer & NFP_FLOWER_LAYER_EXT_META)
697                 mask += sizeof(struct nfp_flower_ext_meta);
698
699         mask += sizeof(struct nfp_flower_in_port);
700
701         if (key_layer & NFP_FLOWER_LAYER_MAC) {
702                 match_size = sizeof(struct nfp_flower_mac_mpls);
703                 memcpy(&merge->l2, mask, match_size);
704                 mask += match_size;
705         }
706
707         if (key_layer & NFP_FLOWER_LAYER_TP) {
708                 match_size = sizeof(struct nfp_flower_tp_ports);
709                 memcpy(&merge->l4, mask, match_size);
710                 mask += match_size;
711         }
712
713         if (key_layer & NFP_FLOWER_LAYER_IPV4) {
714                 match_size = sizeof(struct nfp_flower_ipv4);
715                 memcpy(&merge->ipv4, mask, match_size);
716         }
717
718         if (key_layer & NFP_FLOWER_LAYER_IPV6) {
719                 match_size = sizeof(struct nfp_flower_ipv6);
720                 memcpy(&merge->ipv6, mask, match_size);
721         }
722
723         return 0;
724 }
725
726 static int
727 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
728                      struct nfp_fl_payload *sub_flow2)
729 {
730         /* Two flows can be merged if sub_flow2 only matches on bits that are
731          * either matched by sub_flow1 or set by a sub_flow1 action. This
732          * ensures that every packet that hits sub_flow1 and recirculates is
733          * guaranteed to hit sub_flow2.
734          */
735         struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
736         int err, act_out = 0;
737         u8 last_act_id = 0;
738
739         err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
740                                               true);
741         if (err)
742                 return err;
743
744         err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
745                                               false);
746         if (err)
747                 return err;
748
749         err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
750                                                    &last_act_id, &act_out);
751         if (err)
752                 return err;
753
754         /* Must only be 1 output action and it must be the last in sequence. */
755         if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
756                 return -EOPNOTSUPP;
757
758         /* Reject merge if sub_flow2 matches on something that is not matched
759          * on or set in an action by sub_flow1.
760          */
761         err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
762                             sub_flow1_merge.vals,
763                             sizeof(struct nfp_flower_merge_check) * 8);
764         if (err)
765                 return -EINVAL;
766
767         return 0;
768 }
769
770 static unsigned int
771 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
772                             bool *tunnel_act)
773 {
774         unsigned int act_off = 0, act_len;
775         struct nfp_fl_act_head *a;
776         u8 act_id = 0;
777
778         while (act_off < len) {
779                 a = (struct nfp_fl_act_head *)&act_src[act_off];
780                 act_len = a->len_lw << NFP_FL_LW_SIZ;
781                 act_id = a->jump_id;
782
783                 switch (act_id) {
784                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
785                         if (tunnel_act)
786                                 *tunnel_act = true;
787                         fallthrough;
788                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
789                         memcpy(act_dst + act_off, act_src + act_off, act_len);
790                         break;
791                 default:
792                         return act_off;
793                 }
794
795                 act_off += act_len;
796         }
797
798         return act_off;
799 }
800
801 static int
802 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
803 {
804         struct nfp_fl_act_head *a;
805         unsigned int act_off = 0;
806
807         while (act_off < len) {
808                 a = (struct nfp_fl_act_head *)&acts[act_off];
809
810                 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
811                         *vlan = (struct nfp_fl_push_vlan *)a;
812                 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
813                         return -EOPNOTSUPP;
814
815                 act_off += a->len_lw << NFP_FL_LW_SIZ;
816         }
817
818         /* Ensure any VLAN push also has an egress action. */
819         if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
820                 return -EOPNOTSUPP;
821
822         return 0;
823 }
824
825 static int
826 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
827 {
828         struct nfp_fl_set_tun *tun;
829         struct nfp_fl_act_head *a;
830         unsigned int act_off = 0;
831
832         while (act_off < len) {
833                 a = (struct nfp_fl_act_head *)&acts[act_off];
834
835                 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
836                         tun = (struct nfp_fl_set_tun *)a;
837                         tun->outer_vlan_tpid = vlan->vlan_tpid;
838                         tun->outer_vlan_tci = vlan->vlan_tci;
839
840                         return 0;
841                 }
842
843                 act_off += a->len_lw << NFP_FL_LW_SIZ;
844         }
845
846         /* Return error if no tunnel action is found. */
847         return -EOPNOTSUPP;
848 }
849
850 static int
851 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
852                         struct nfp_fl_payload *sub_flow2,
853                         struct nfp_fl_payload *merge_flow)
854 {
855         unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
856         struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
857         bool tunnel_act = false;
858         char *merge_act;
859         int err;
860
861         /* The last action of sub_flow1 must be output - do not merge this. */
862         sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
863         sub2_act_len = sub_flow2->meta.act_len;
864
865         if (!sub2_act_len)
866                 return -EINVAL;
867
868         if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
869                 return -EINVAL;
870
871         /* A shortcut can only be applied if there is a single action. */
872         if (sub1_act_len)
873                 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
874         else
875                 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
876
877         merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
878         merge_act = merge_flow->action_data;
879
880         /* Copy any pre-actions to the start of merge flow action list. */
881         pre_off1 = nfp_flower_copy_pre_actions(merge_act,
882                                                sub_flow1->action_data,
883                                                sub1_act_len, &tunnel_act);
884         merge_act += pre_off1;
885         sub1_act_len -= pre_off1;
886         pre_off2 = nfp_flower_copy_pre_actions(merge_act,
887                                                sub_flow2->action_data,
888                                                sub2_act_len, NULL);
889         merge_act += pre_off2;
890         sub2_act_len -= pre_off2;
891
892         /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
893          * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
894          * valid merge.
895          */
896         if (tunnel_act) {
897                 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
898
899                 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
900                                                   &post_tun_push_vlan);
901                 if (err)
902                         return err;
903
904                 if (post_tun_push_vlan) {
905                         pre_off2 += sizeof(*post_tun_push_vlan);
906                         sub2_act_len -= sizeof(*post_tun_push_vlan);
907                 }
908         }
909
910         /* Copy remaining actions from sub_flows 1 and 2. */
911         memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
912
913         if (post_tun_push_vlan) {
914                 /* Update tunnel action in merge to include VLAN push. */
915                 err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
916                                                  post_tun_push_vlan);
917                 if (err)
918                         return err;
919
920                 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
921         }
922
923         merge_act += sub1_act_len;
924         memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
925
926         return 0;
927 }
928
929 /* Flow link code should only be accessed under RTNL. */
930 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
931 {
932         list_del(&link->merge_flow.list);
933         list_del(&link->sub_flow.list);
934         kfree(link);
935 }
936
937 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
938                                     struct nfp_fl_payload *sub_flow)
939 {
940         struct nfp_fl_payload_link *link;
941
942         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
943                 if (link->sub_flow.flow == sub_flow) {
944                         nfp_flower_unlink_flow(link);
945                         return;
946                 }
947 }
948
949 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
950                                  struct nfp_fl_payload *sub_flow)
951 {
952         struct nfp_fl_payload_link *link;
953
954         link = kmalloc(sizeof(*link), GFP_KERNEL);
955         if (!link)
956                 return -ENOMEM;
957
958         link->merge_flow.flow = merge_flow;
959         list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
960         link->sub_flow.flow = sub_flow;
961         list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
962
963         return 0;
964 }
965
966 /**
967  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
968  * @app:        Pointer to the APP handle
969  * @sub_flow1:  Initial flow matched to produce merge hint
970  * @sub_flow2:  Post recirculation flow matched in merge hint
971  *
972  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
973  * and offloading the new, merged flow.
974  *
975  * Return: negative value on error, 0 in success.
976  */
977 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
978                                      struct nfp_fl_payload *sub_flow1,
979                                      struct nfp_fl_payload *sub_flow2)
980 {
981         struct flow_cls_offload merge_tc_off;
982         struct nfp_flower_priv *priv = app->priv;
983         struct netlink_ext_ack *extack = NULL;
984         struct nfp_fl_payload *merge_flow;
985         struct nfp_fl_key_ls merge_key_ls;
986         int err;
987
988         ASSERT_RTNL();
989
990         extack = merge_tc_off.common.extack;
991         if (sub_flow1 == sub_flow2 ||
992             nfp_flower_is_merge_flow(sub_flow1) ||
993             nfp_flower_is_merge_flow(sub_flow2))
994                 return -EINVAL;
995
996         err = nfp_flower_can_merge(sub_flow1, sub_flow2);
997         if (err)
998                 return err;
999
1000         merge_key_ls.key_size = sub_flow1->meta.key_len;
1001
1002         merge_flow = nfp_flower_allocate_new(&merge_key_ls);
1003         if (!merge_flow)
1004                 return -ENOMEM;
1005
1006         merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
1007         merge_flow->ingress_dev = sub_flow1->ingress_dev;
1008
1009         memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
1010                sub_flow1->meta.key_len);
1011         memcpy(merge_flow->mask_data, sub_flow1->mask_data,
1012                sub_flow1->meta.mask_len);
1013
1014         err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
1015         if (err)
1016                 goto err_destroy_merge_flow;
1017
1018         err = nfp_flower_link_flows(merge_flow, sub_flow1);
1019         if (err)
1020                 goto err_destroy_merge_flow;
1021
1022         err = nfp_flower_link_flows(merge_flow, sub_flow2);
1023         if (err)
1024                 goto err_unlink_sub_flow1;
1025
1026         merge_tc_off.cookie = merge_flow->tc_flower_cookie;
1027         err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
1028                                         merge_flow->ingress_dev, extack);
1029         if (err)
1030                 goto err_unlink_sub_flow2;
1031
1032         err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
1033                                      nfp_flower_table_params);
1034         if (err)
1035                 goto err_release_metadata;
1036
1037         err = nfp_flower_xmit_flow(app, merge_flow,
1038                                    NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1039         if (err)
1040                 goto err_remove_rhash;
1041
1042         merge_flow->in_hw = true;
1043         sub_flow1->in_hw = false;
1044
1045         return 0;
1046
1047 err_remove_rhash:
1048         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1049                                             &merge_flow->fl_node,
1050                                             nfp_flower_table_params));
1051 err_release_metadata:
1052         nfp_modify_flow_metadata(app, merge_flow);
1053 err_unlink_sub_flow2:
1054         nfp_flower_unlink_flows(merge_flow, sub_flow2);
1055 err_unlink_sub_flow1:
1056         nfp_flower_unlink_flows(merge_flow, sub_flow1);
1057 err_destroy_merge_flow:
1058         kfree(merge_flow->action_data);
1059         kfree(merge_flow->mask_data);
1060         kfree(merge_flow->unmasked_data);
1061         kfree(merge_flow);
1062         return err;
1063 }
1064
1065 /**
1066  * nfp_flower_validate_pre_tun_rule()
1067  * @app:        Pointer to the APP handle
1068  * @flow:       Pointer to NFP flow representation of rule
1069  * @extack:     Netlink extended ACK report
1070  *
1071  * Verifies the flow as a pre-tunnel rule.
1072  *
1073  * Return: negative value on error, 0 if verified.
1074  */
1075 static int
1076 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1077                                  struct nfp_fl_payload *flow,
1078                                  struct netlink_ext_ack *extack)
1079 {
1080         struct nfp_flower_meta_tci *meta_tci;
1081         struct nfp_flower_mac_mpls *mac;
1082         struct nfp_fl_act_head *act;
1083         u8 *mask = flow->mask_data;
1084         bool vlan = false;
1085         int act_offset;
1086         u8 key_layer;
1087
1088         meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1089         if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1090                 u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1091
1092                 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1093                 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1094                 vlan = true;
1095         } else {
1096                 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1097         }
1098
1099         key_layer = meta_tci->nfp_flow_key_layer;
1100         if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1101                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1102                 return -EOPNOTSUPP;
1103         }
1104
1105         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1106                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1107                 return -EOPNOTSUPP;
1108         }
1109
1110         /* Skip fields known to exist. */
1111         mask += sizeof(struct nfp_flower_meta_tci);
1112         mask += sizeof(struct nfp_flower_in_port);
1113
1114         /* Ensure destination MAC address is fully matched. */
1115         mac = (struct nfp_flower_mac_mpls *)mask;
1116         if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1117                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1118                 return -EOPNOTSUPP;
1119         }
1120
1121         if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
1122             key_layer & NFP_FLOWER_LAYER_IPV6) {
1123                 /* Flags and proto fields have same offset in IPv4 and IPv6. */
1124                 int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1125                 int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1126                 int size;
1127                 int i;
1128
1129                 size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
1130                         sizeof(struct nfp_flower_ipv4) :
1131                         sizeof(struct nfp_flower_ipv6);
1132
1133                 mask += sizeof(struct nfp_flower_mac_mpls);
1134
1135                 /* Ensure proto and flags are the only IP layer fields. */
1136                 for (i = 0; i < size; i++)
1137                         if (mask[i] && i != ip_flags && i != ip_proto) {
1138                                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1139                                 return -EOPNOTSUPP;
1140                         }
1141         }
1142
1143         /* Action must be a single egress or pop_vlan and egress. */
1144         act_offset = 0;
1145         act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1146         if (vlan) {
1147                 if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1148                         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1149                         return -EOPNOTSUPP;
1150                 }
1151
1152                 act_offset += act->len_lw << NFP_FL_LW_SIZ;
1153                 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1154         }
1155
1156         if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1157                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1158                 return -EOPNOTSUPP;
1159         }
1160
1161         act_offset += act->len_lw << NFP_FL_LW_SIZ;
1162
1163         /* Ensure there are no more actions after egress. */
1164         if (act_offset != flow->meta.act_len) {
1165                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1166                 return -EOPNOTSUPP;
1167         }
1168
1169         return 0;
1170 }
1171
1172 /**
1173  * nfp_flower_add_offload() - Adds a new flow to hardware.
1174  * @app:        Pointer to the APP handle
1175  * @netdev:     netdev structure.
1176  * @flow:       TC flower classifier offload structure.
1177  *
1178  * Adds a new flow to the repeated hash structure and action payload.
1179  *
1180  * Return: negative value on error, 0 if configured successfully.
1181  */
1182 static int
1183 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1184                        struct flow_cls_offload *flow)
1185 {
1186         enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1187         struct nfp_flower_priv *priv = app->priv;
1188         struct netlink_ext_ack *extack = NULL;
1189         struct nfp_fl_payload *flow_pay;
1190         struct nfp_fl_key_ls *key_layer;
1191         struct nfp_port *port = NULL;
1192         int err;
1193
1194         extack = flow->common.extack;
1195         if (nfp_netdev_is_nfp_repr(netdev))
1196                 port = nfp_port_from_netdev(netdev);
1197
1198         key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1199         if (!key_layer)
1200                 return -ENOMEM;
1201
1202         err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
1203                                               &tun_type, extack);
1204         if (err)
1205                 goto err_free_key_ls;
1206
1207         flow_pay = nfp_flower_allocate_new(key_layer);
1208         if (!flow_pay) {
1209                 err = -ENOMEM;
1210                 goto err_free_key_ls;
1211         }
1212
1213         err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
1214                                             flow_pay, tun_type, extack);
1215         if (err)
1216                 goto err_destroy_flow;
1217
1218         err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
1219         if (err)
1220                 goto err_destroy_flow;
1221
1222         if (flow_pay->pre_tun_rule.dev) {
1223                 err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
1224                 if (err)
1225                         goto err_destroy_flow;
1226         }
1227
1228         err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
1229         if (err)
1230                 goto err_destroy_flow;
1231
1232         flow_pay->tc_flower_cookie = flow->cookie;
1233         err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1234                                      nfp_flower_table_params);
1235         if (err) {
1236                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1237                 goto err_release_metadata;
1238         }
1239
1240         if (flow_pay->pre_tun_rule.dev)
1241                 err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1242         else
1243                 err = nfp_flower_xmit_flow(app, flow_pay,
1244                                            NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1245         if (err)
1246                 goto err_remove_rhash;
1247
1248         if (port)
1249                 port->tc_offload_cnt++;
1250
1251         flow_pay->in_hw = true;
1252
1253         /* Deallocate flow payload when flower rule has been destroyed. */
1254         kfree(key_layer);
1255
1256         return 0;
1257
1258 err_remove_rhash:
1259         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1260                                             &flow_pay->fl_node,
1261                                             nfp_flower_table_params));
1262 err_release_metadata:
1263         nfp_modify_flow_metadata(app, flow_pay);
1264 err_destroy_flow:
1265         if (flow_pay->nfp_tun_ipv6)
1266                 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1267         kfree(flow_pay->action_data);
1268         kfree(flow_pay->mask_data);
1269         kfree(flow_pay->unmasked_data);
1270         kfree(flow_pay);
1271 err_free_key_ls:
1272         kfree(key_layer);
1273         return err;
1274 }
1275
1276 static void
1277 nfp_flower_remove_merge_flow(struct nfp_app *app,
1278                              struct nfp_fl_payload *del_sub_flow,
1279                              struct nfp_fl_payload *merge_flow)
1280 {
1281         struct nfp_flower_priv *priv = app->priv;
1282         struct nfp_fl_payload_link *link, *temp;
1283         struct nfp_fl_payload *origin;
1284         bool mod = false;
1285         int err;
1286
1287         link = list_first_entry(&merge_flow->linked_flows,
1288                                 struct nfp_fl_payload_link, merge_flow.list);
1289         origin = link->sub_flow.flow;
1290
1291         /* Re-add rule the merge had overwritten if it has not been deleted. */
1292         if (origin != del_sub_flow)
1293                 mod = true;
1294
1295         err = nfp_modify_flow_metadata(app, merge_flow);
1296         if (err) {
1297                 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1298                 goto err_free_links;
1299         }
1300
1301         if (!mod) {
1302                 err = nfp_flower_xmit_flow(app, merge_flow,
1303                                            NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1304                 if (err) {
1305                         nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1306                         goto err_free_links;
1307                 }
1308         } else {
1309                 __nfp_modify_flow_metadata(priv, origin);
1310                 err = nfp_flower_xmit_flow(app, origin,
1311                                            NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1312                 if (err)
1313                         nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1314                 origin->in_hw = true;
1315         }
1316
1317 err_free_links:
1318         /* Clean any links connected with the merged flow. */
1319         list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1320                                  merge_flow.list)
1321                 nfp_flower_unlink_flow(link);
1322
1323         kfree(merge_flow->action_data);
1324         kfree(merge_flow->mask_data);
1325         kfree(merge_flow->unmasked_data);
1326         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1327                                             &merge_flow->fl_node,
1328                                             nfp_flower_table_params));
1329         kfree_rcu(merge_flow, rcu);
1330 }
1331
1332 static void
1333 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1334                                   struct nfp_fl_payload *sub_flow)
1335 {
1336         struct nfp_fl_payload_link *link, *temp;
1337
1338         /* Remove any merge flow formed from the deleted sub_flow. */
1339         list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1340                                  sub_flow.list)
1341                 nfp_flower_remove_merge_flow(app, sub_flow,
1342                                              link->merge_flow.flow);
1343 }
1344
1345 /**
1346  * nfp_flower_del_offload() - Removes a flow from hardware.
1347  * @app:        Pointer to the APP handle
1348  * @netdev:     netdev structure.
1349  * @flow:       TC flower classifier offload structure
1350  *
1351  * Removes a flow from the repeated hash structure and clears the
1352  * action payload. Any flows merged from this are also deleted.
1353  *
1354  * Return: negative value on error, 0 if removed successfully.
1355  */
1356 static int
1357 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1358                        struct flow_cls_offload *flow)
1359 {
1360         struct nfp_flower_priv *priv = app->priv;
1361         struct netlink_ext_ack *extack = NULL;
1362         struct nfp_fl_payload *nfp_flow;
1363         struct nfp_port *port = NULL;
1364         int err;
1365
1366         extack = flow->common.extack;
1367         if (nfp_netdev_is_nfp_repr(netdev))
1368                 port = nfp_port_from_netdev(netdev);
1369
1370         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1371         if (!nfp_flow) {
1372                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1373                 return -ENOENT;
1374         }
1375
1376         err = nfp_modify_flow_metadata(app, nfp_flow);
1377         if (err)
1378                 goto err_free_merge_flow;
1379
1380         if (nfp_flow->nfp_tun_ipv4_addr)
1381                 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1382
1383         if (nfp_flow->nfp_tun_ipv6)
1384                 nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
1385
1386         if (!nfp_flow->in_hw) {
1387                 err = 0;
1388                 goto err_free_merge_flow;
1389         }
1390
1391         if (nfp_flow->pre_tun_rule.dev)
1392                 err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1393         else
1394                 err = nfp_flower_xmit_flow(app, nfp_flow,
1395                                            NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1396         /* Fall through on error. */
1397
1398 err_free_merge_flow:
1399         nfp_flower_del_linked_merge_flows(app, nfp_flow);
1400         if (port)
1401                 port->tc_offload_cnt--;
1402         kfree(nfp_flow->action_data);
1403         kfree(nfp_flow->mask_data);
1404         kfree(nfp_flow->unmasked_data);
1405         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1406                                             &nfp_flow->fl_node,
1407                                             nfp_flower_table_params));
1408         kfree_rcu(nfp_flow, rcu);
1409         return err;
1410 }
1411
1412 static void
1413 __nfp_flower_update_merge_stats(struct nfp_app *app,
1414                                 struct nfp_fl_payload *merge_flow)
1415 {
1416         struct nfp_flower_priv *priv = app->priv;
1417         struct nfp_fl_payload_link *link;
1418         struct nfp_fl_payload *sub_flow;
1419         u64 pkts, bytes, used;
1420         u32 ctx_id;
1421
1422         ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1423         pkts = priv->stats[ctx_id].pkts;
1424         /* Do not cycle subflows if no stats to distribute. */
1425         if (!pkts)
1426                 return;
1427         bytes = priv->stats[ctx_id].bytes;
1428         used = priv->stats[ctx_id].used;
1429
1430         /* Reset stats for the merge flow. */
1431         priv->stats[ctx_id].pkts = 0;
1432         priv->stats[ctx_id].bytes = 0;
1433
1434         /* The merge flow has received stats updates from firmware.
1435          * Distribute these stats to all subflows that form the merge.
1436          * The stats will collected from TC via the subflows.
1437          */
1438         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1439                 sub_flow = link->sub_flow.flow;
1440                 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1441                 priv->stats[ctx_id].pkts += pkts;
1442                 priv->stats[ctx_id].bytes += bytes;
1443                 priv->stats[ctx_id].used = max_t(u64, used,
1444                                                  priv->stats[ctx_id].used);
1445         }
1446 }
1447
1448 static void
1449 nfp_flower_update_merge_stats(struct nfp_app *app,
1450                               struct nfp_fl_payload *sub_flow)
1451 {
1452         struct nfp_fl_payload_link *link;
1453
1454         /* Get merge flows that the subflow forms to distribute their stats. */
1455         list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1456                 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1457 }
1458
1459 /**
1460  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1461  * @app:        Pointer to the APP handle
1462  * @netdev:     Netdev structure.
1463  * @flow:       TC flower classifier offload structure
1464  *
1465  * Populates a flow statistics structure which which corresponds to a
1466  * specific flow.
1467  *
1468  * Return: negative value on error, 0 if stats populated successfully.
1469  */
1470 static int
1471 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1472                      struct flow_cls_offload *flow)
1473 {
1474         struct nfp_flower_priv *priv = app->priv;
1475         struct netlink_ext_ack *extack = NULL;
1476         struct nfp_fl_payload *nfp_flow;
1477         u32 ctx_id;
1478
1479         extack = flow->common.extack;
1480         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1481         if (!nfp_flow) {
1482                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1483                 return -EINVAL;
1484         }
1485
1486         ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1487
1488         spin_lock_bh(&priv->stats_lock);
1489         /* If request is for a sub_flow, update stats from merged flows. */
1490         if (!list_empty(&nfp_flow->linked_flows))
1491                 nfp_flower_update_merge_stats(app, nfp_flow);
1492
1493         flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1494                           priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
1495                           FLOW_ACTION_HW_STATS_DELAYED);
1496
1497         priv->stats[ctx_id].pkts = 0;
1498         priv->stats[ctx_id].bytes = 0;
1499         spin_unlock_bh(&priv->stats_lock);
1500
1501         return 0;
1502 }
1503
1504 static int
1505 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1506                         struct flow_cls_offload *flower)
1507 {
1508         if (!eth_proto_is_802_3(flower->common.protocol))
1509                 return -EOPNOTSUPP;
1510
1511         switch (flower->command) {
1512         case FLOW_CLS_REPLACE:
1513                 return nfp_flower_add_offload(app, netdev, flower);
1514         case FLOW_CLS_DESTROY:
1515                 return nfp_flower_del_offload(app, netdev, flower);
1516         case FLOW_CLS_STATS:
1517                 return nfp_flower_get_stats(app, netdev, flower);
1518         default:
1519                 return -EOPNOTSUPP;
1520         }
1521 }
1522
1523 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1524                                         void *type_data, void *cb_priv)
1525 {
1526         struct nfp_repr *repr = cb_priv;
1527
1528         if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1529                 return -EOPNOTSUPP;
1530
1531         switch (type) {
1532         case TC_SETUP_CLSFLOWER:
1533                 return nfp_flower_repr_offload(repr->app, repr->netdev,
1534                                                type_data);
1535         case TC_SETUP_CLSMATCHALL:
1536                 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1537                                                     type_data);
1538         default:
1539                 return -EOPNOTSUPP;
1540         }
1541 }
1542
1543 static LIST_HEAD(nfp_block_cb_list);
1544
1545 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1546                                      struct flow_block_offload *f)
1547 {
1548         struct nfp_repr *repr = netdev_priv(netdev);
1549         struct nfp_flower_repr_priv *repr_priv;
1550         struct flow_block_cb *block_cb;
1551
1552         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1553                 return -EOPNOTSUPP;
1554
1555         repr_priv = repr->app_priv;
1556         repr_priv->block_shared = f->block_shared;
1557         f->driver_block_list = &nfp_block_cb_list;
1558
1559         switch (f->command) {
1560         case FLOW_BLOCK_BIND:
1561                 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1562                                           &nfp_block_cb_list))
1563                         return -EBUSY;
1564
1565                 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1566                                                repr, repr, NULL);
1567                 if (IS_ERR(block_cb))
1568                         return PTR_ERR(block_cb);
1569
1570                 flow_block_cb_add(block_cb, f);
1571                 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1572                 return 0;
1573         case FLOW_BLOCK_UNBIND:
1574                 block_cb = flow_block_cb_lookup(f->block,
1575                                                 nfp_flower_setup_tc_block_cb,
1576                                                 repr);
1577                 if (!block_cb)
1578                         return -ENOENT;
1579
1580                 flow_block_cb_remove(block_cb, f);
1581                 list_del(&block_cb->driver_list);
1582                 return 0;
1583         default:
1584                 return -EOPNOTSUPP;
1585         }
1586 }
1587
1588 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1589                         enum tc_setup_type type, void *type_data)
1590 {
1591         switch (type) {
1592         case TC_SETUP_BLOCK:
1593                 return nfp_flower_setup_tc_block(netdev, type_data);
1594         default:
1595                 return -EOPNOTSUPP;
1596         }
1597 }
1598
1599 struct nfp_flower_indr_block_cb_priv {
1600         struct net_device *netdev;
1601         struct nfp_app *app;
1602         struct list_head list;
1603 };
1604
1605 static struct nfp_flower_indr_block_cb_priv *
1606 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1607                                      struct net_device *netdev)
1608 {
1609         struct nfp_flower_indr_block_cb_priv *cb_priv;
1610         struct nfp_flower_priv *priv = app->priv;
1611
1612         /* All callback list access should be protected by RTNL. */
1613         ASSERT_RTNL();
1614
1615         list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1616                 if (cb_priv->netdev == netdev)
1617                         return cb_priv;
1618
1619         return NULL;
1620 }
1621
1622 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1623                                           void *type_data, void *cb_priv)
1624 {
1625         struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1626         struct flow_cls_offload *flower = type_data;
1627
1628         if (flower->common.chain_index)
1629                 return -EOPNOTSUPP;
1630
1631         switch (type) {
1632         case TC_SETUP_CLSFLOWER:
1633                 return nfp_flower_repr_offload(priv->app, priv->netdev,
1634                                                type_data);
1635         default:
1636                 return -EOPNOTSUPP;
1637         }
1638 }
1639
1640 void nfp_flower_setup_indr_tc_release(void *cb_priv)
1641 {
1642         struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1643
1644         list_del(&priv->list);
1645         kfree(priv);
1646 }
1647
1648 static int
1649 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
1650                                struct flow_block_offload *f, void *data,
1651                                void (*cleanup)(struct flow_block_cb *block_cb))
1652 {
1653         struct nfp_flower_indr_block_cb_priv *cb_priv;
1654         struct nfp_flower_priv *priv = app->priv;
1655         struct flow_block_cb *block_cb;
1656
1657         if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1658              !nfp_flower_internal_port_can_offload(app, netdev)) ||
1659             (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1660              nfp_flower_internal_port_can_offload(app, netdev)))
1661                 return -EOPNOTSUPP;
1662
1663         switch (f->command) {
1664         case FLOW_BLOCK_BIND:
1665                 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1666                 if (cb_priv &&
1667                     flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1668                                           cb_priv,
1669                                           &nfp_block_cb_list))
1670                         return -EBUSY;
1671
1672                 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1673                 if (!cb_priv)
1674                         return -ENOMEM;
1675
1676                 cb_priv->netdev = netdev;
1677                 cb_priv->app = app;
1678                 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1679
1680                 block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1681                                                     cb_priv, cb_priv,
1682                                                     nfp_flower_setup_indr_tc_release,
1683                                                     f, netdev, sch, data, app, cleanup);
1684                 if (IS_ERR(block_cb)) {
1685                         list_del(&cb_priv->list);
1686                         kfree(cb_priv);
1687                         return PTR_ERR(block_cb);
1688                 }
1689
1690                 flow_block_cb_add(block_cb, f);
1691                 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1692                 return 0;
1693         case FLOW_BLOCK_UNBIND:
1694                 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1695                 if (!cb_priv)
1696                         return -ENOENT;
1697
1698                 block_cb = flow_block_cb_lookup(f->block,
1699                                                 nfp_flower_setup_indr_block_cb,
1700                                                 cb_priv);
1701                 if (!block_cb)
1702                         return -ENOENT;
1703
1704                 flow_indr_block_cb_remove(block_cb, f);
1705                 list_del(&block_cb->driver_list);
1706                 return 0;
1707         default:
1708                 return -EOPNOTSUPP;
1709         }
1710         return 0;
1711 }
1712
1713 int
1714 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1715                             enum tc_setup_type type, void *type_data,
1716                             void *data,
1717                             void (*cleanup)(struct flow_block_cb *block_cb))
1718 {
1719         if (!nfp_fl_is_netdev_to_offload(netdev))
1720                 return -EOPNOTSUPP;
1721
1722         switch (type) {
1723         case TC_SETUP_BLOCK:
1724                 return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
1725                                                       type_data, data, cleanup);
1726         default:
1727                 return -EOPNOTSUPP;
1728         }
1729 }