Merge branch 'tegra/dt64' into arm/fixes
[linux-2.6-microblaze.git] / drivers / net / ethernet / netronome / nfp / flower / offload.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18         (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19          TCPHDR_PSH | TCPHDR_URG)
20
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22         (FLOW_DIS_IS_FRAGMENT | \
23          FLOW_DIS_FIRST_FRAG)
24
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26         (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27          BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28          BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29          BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30          BIT(FLOW_DISSECTOR_KEY_TCP) | \
31          BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32          BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33          BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34          BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
35          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
36          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
37          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
38          BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
39          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
40          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
41          BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
42          BIT(FLOW_DISSECTOR_KEY_MPLS) | \
43          BIT(FLOW_DISSECTOR_KEY_IP))
44
45 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
46         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
47          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
48          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
49          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
50          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
51          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
52          BIT(FLOW_DISSECTOR_KEY_ENC_IP))
53
54 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
55         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
56          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
57
58 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
59         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
60          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
61
62 #define NFP_FLOWER_MERGE_FIELDS \
63         (NFP_FLOWER_LAYER_PORT | \
64          NFP_FLOWER_LAYER_MAC | \
65          NFP_FLOWER_LAYER_TP | \
66          NFP_FLOWER_LAYER_IPV4 | \
67          NFP_FLOWER_LAYER_IPV6)
68
69 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
70         (NFP_FLOWER_LAYER_EXT_META | \
71          NFP_FLOWER_LAYER_PORT | \
72          NFP_FLOWER_LAYER_MAC | \
73          NFP_FLOWER_LAYER_IPV4 | \
74          NFP_FLOWER_LAYER_IPV6)
75
76 struct nfp_flower_merge_check {
77         union {
78                 struct {
79                         __be16 tci;
80                         struct nfp_flower_mac_mpls l2;
81                         struct nfp_flower_tp_ports l4;
82                         union {
83                                 struct nfp_flower_ipv4 ipv4;
84                                 struct nfp_flower_ipv6 ipv6;
85                         };
86                 };
87                 unsigned long vals[8];
88         };
89 };
90
91 static int
92 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
93                      u8 mtype)
94 {
95         u32 meta_len, key_len, mask_len, act_len, tot_len;
96         struct sk_buff *skb;
97         unsigned char *msg;
98
99         meta_len =  sizeof(struct nfp_fl_rule_metadata);
100         key_len = nfp_flow->meta.key_len;
101         mask_len = nfp_flow->meta.mask_len;
102         act_len = nfp_flow->meta.act_len;
103
104         tot_len = meta_len + key_len + mask_len + act_len;
105
106         /* Convert to long words as firmware expects
107          * lengths in units of NFP_FL_LW_SIZ.
108          */
109         nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
110         nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
111         nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
112
113         skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
114         if (!skb)
115                 return -ENOMEM;
116
117         msg = nfp_flower_cmsg_get_data(skb);
118         memcpy(msg, &nfp_flow->meta, meta_len);
119         memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
120         memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
121         memcpy(&msg[meta_len + key_len + mask_len],
122                nfp_flow->action_data, act_len);
123
124         /* Convert back to bytes as software expects
125          * lengths in units of bytes.
126          */
127         nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
128         nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
129         nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
130
131         nfp_ctrl_tx(app->ctrl, skb);
132
133         return 0;
134 }
135
136 static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
137 {
138         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
139
140         return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
141                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
142                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
143                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
144 }
145
146 static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
147 {
148         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
149
150         return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
151                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
152 }
153
154 static int
155 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
156                           u32 *key_layer_two, int *key_size, bool ipv6,
157                           struct netlink_ext_ack *extack)
158 {
159         if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
160             (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
161                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
162                 return -EOPNOTSUPP;
163         }
164
165         if (enc_opts->len > 0) {
166                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
167                 *key_size += sizeof(struct nfp_flower_geneve_options);
168         }
169
170         return 0;
171 }
172
173 static int
174 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
175                               struct flow_dissector_key_enc_opts *enc_op,
176                               u32 *key_layer_two, u8 *key_layer, int *key_size,
177                               struct nfp_flower_priv *priv,
178                               enum nfp_flower_tun_type *tun_type, bool ipv6,
179                               struct netlink_ext_ack *extack)
180 {
181         int err;
182
183         switch (enc_ports->dst) {
184         case htons(IANA_VXLAN_UDP_PORT):
185                 *tun_type = NFP_FL_TUNNEL_VXLAN;
186                 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
187
188                 if (ipv6) {
189                         *key_layer |= NFP_FLOWER_LAYER_EXT_META;
190                         *key_size += sizeof(struct nfp_flower_ext_meta);
191                         *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
192                         *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
193                 } else {
194                         *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
195                 }
196
197                 if (enc_op) {
198                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
199                         return -EOPNOTSUPP;
200                 }
201                 break;
202         case htons(GENEVE_UDP_PORT):
203                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
204                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
205                         return -EOPNOTSUPP;
206                 }
207                 *tun_type = NFP_FL_TUNNEL_GENEVE;
208                 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
209                 *key_size += sizeof(struct nfp_flower_ext_meta);
210                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
211
212                 if (ipv6) {
213                         *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
214                         *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
215                 } else {
216                         *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
217                 }
218
219                 if (!enc_op)
220                         break;
221                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
222                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
223                         return -EOPNOTSUPP;
224                 }
225                 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
226                                                 ipv6, extack);
227                 if (err)
228                         return err;
229                 break;
230         default:
231                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
232                 return -EOPNOTSUPP;
233         }
234
235         return 0;
236 }
237
238 static int
239 nfp_flower_calculate_key_layers(struct nfp_app *app,
240                                 struct net_device *netdev,
241                                 struct nfp_fl_key_ls *ret_key_ls,
242                                 struct flow_cls_offload *flow,
243                                 enum nfp_flower_tun_type *tun_type,
244                                 struct netlink_ext_ack *extack)
245 {
246         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
247         struct flow_dissector *dissector = rule->match.dissector;
248         struct flow_match_basic basic = { NULL, NULL};
249         struct nfp_flower_priv *priv = app->priv;
250         u32 key_layer_two;
251         u8 key_layer;
252         int key_size;
253         int err;
254
255         if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
256                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
257                 return -EOPNOTSUPP;
258         }
259
260         /* If any tun dissector is used then the required set must be used. */
261         if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
262             (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
263             != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
264             (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
265             != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
266                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
267                 return -EOPNOTSUPP;
268         }
269
270         key_layer_two = 0;
271         key_layer = NFP_FLOWER_LAYER_PORT;
272         key_size = sizeof(struct nfp_flower_meta_tci) +
273                    sizeof(struct nfp_flower_in_port);
274
275         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
276             flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
277                 key_layer |= NFP_FLOWER_LAYER_MAC;
278                 key_size += sizeof(struct nfp_flower_mac_mpls);
279         }
280
281         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
282                 struct flow_match_vlan vlan;
283
284                 flow_rule_match_vlan(rule, &vlan);
285                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
286                     vlan.key->vlan_priority) {
287                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
288                         return -EOPNOTSUPP;
289                 }
290                 if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
291                     !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
292                         key_layer |= NFP_FLOWER_LAYER_EXT_META;
293                         key_size += sizeof(struct nfp_flower_ext_meta);
294                         key_size += sizeof(struct nfp_flower_vlan);
295                         key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
296                 }
297         }
298
299         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
300                 struct flow_match_vlan cvlan;
301
302                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
303                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
304                         return -EOPNOTSUPP;
305                 }
306
307                 flow_rule_match_vlan(rule, &cvlan);
308                 if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
309                         key_layer |= NFP_FLOWER_LAYER_EXT_META;
310                         key_size += sizeof(struct nfp_flower_ext_meta);
311                         key_size += sizeof(struct nfp_flower_vlan);
312                         key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
313                 }
314         }
315
316         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
317                 struct flow_match_enc_opts enc_op = { NULL, NULL };
318                 struct flow_match_ipv4_addrs ipv4_addrs;
319                 struct flow_match_ipv6_addrs ipv6_addrs;
320                 struct flow_match_control enc_ctl;
321                 struct flow_match_ports enc_ports;
322                 bool ipv6_tun = false;
323
324                 flow_rule_match_enc_control(rule, &enc_ctl);
325
326                 if (enc_ctl.mask->addr_type != 0xffff) {
327                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
328                         return -EOPNOTSUPP;
329                 }
330
331                 ipv6_tun = enc_ctl.key->addr_type ==
332                                 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
333                 if (ipv6_tun &&
334                     !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
335                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
336                         return -EOPNOTSUPP;
337                 }
338
339                 if (!ipv6_tun &&
340                     enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
341                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
342                         return -EOPNOTSUPP;
343                 }
344
345                 if (ipv6_tun) {
346                         flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
347                         if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
348                                        sizeof(ipv6_addrs.mask->dst))) {
349                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
350                                 return -EOPNOTSUPP;
351                         }
352                 } else {
353                         flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
354                         if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
355                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
356                                 return -EOPNOTSUPP;
357                         }
358                 }
359
360                 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
361                         flow_rule_match_enc_opts(rule, &enc_op);
362
363                 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
364                         /* check if GRE, which has no enc_ports */
365                         if (!netif_is_gretap(netdev)) {
366                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
367                                 return -EOPNOTSUPP;
368                         }
369
370                         *tun_type = NFP_FL_TUNNEL_GRE;
371                         key_layer |= NFP_FLOWER_LAYER_EXT_META;
372                         key_size += sizeof(struct nfp_flower_ext_meta);
373                         key_layer_two |= NFP_FLOWER_LAYER2_GRE;
374
375                         if (ipv6_tun) {
376                                 key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
377                                 key_size +=
378                                         sizeof(struct nfp_flower_ipv6_udp_tun);
379                         } else {
380                                 key_size +=
381                                         sizeof(struct nfp_flower_ipv4_udp_tun);
382                         }
383
384                         if (enc_op.key) {
385                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
386                                 return -EOPNOTSUPP;
387                         }
388                 } else {
389                         flow_rule_match_enc_ports(rule, &enc_ports);
390                         if (enc_ports.mask->dst != cpu_to_be16(~0)) {
391                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
392                                 return -EOPNOTSUPP;
393                         }
394
395                         err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
396                                                             enc_op.key,
397                                                             &key_layer_two,
398                                                             &key_layer,
399                                                             &key_size, priv,
400                                                             tun_type, ipv6_tun,
401                                                             extack);
402                         if (err)
403                                 return err;
404
405                         /* Ensure the ingress netdev matches the expected
406                          * tun type.
407                          */
408                         if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
409                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
410                                 return -EOPNOTSUPP;
411                         }
412                 }
413         }
414
415         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
416                 flow_rule_match_basic(rule, &basic);
417
418         if (basic.mask && basic.mask->n_proto) {
419                 /* Ethernet type is present in the key. */
420                 switch (basic.key->n_proto) {
421                 case cpu_to_be16(ETH_P_IP):
422                         key_layer |= NFP_FLOWER_LAYER_IPV4;
423                         key_size += sizeof(struct nfp_flower_ipv4);
424                         break;
425
426                 case cpu_to_be16(ETH_P_IPV6):
427                         key_layer |= NFP_FLOWER_LAYER_IPV6;
428                         key_size += sizeof(struct nfp_flower_ipv6);
429                         break;
430
431                 /* Currently we do not offload ARP
432                  * because we rely on it to get to the host.
433                  */
434                 case cpu_to_be16(ETH_P_ARP):
435                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
436                         return -EOPNOTSUPP;
437
438                 case cpu_to_be16(ETH_P_MPLS_UC):
439                 case cpu_to_be16(ETH_P_MPLS_MC):
440                         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
441                                 key_layer |= NFP_FLOWER_LAYER_MAC;
442                                 key_size += sizeof(struct nfp_flower_mac_mpls);
443                         }
444                         break;
445
446                 /* Will be included in layer 2. */
447                 case cpu_to_be16(ETH_P_8021Q):
448                         break;
449
450                 default:
451                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
452                         return -EOPNOTSUPP;
453                 }
454         } else if (nfp_flower_check_higher_than_mac(flow)) {
455                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
456                 return -EOPNOTSUPP;
457         }
458
459         if (basic.mask && basic.mask->ip_proto) {
460                 switch (basic.key->ip_proto) {
461                 case IPPROTO_TCP:
462                 case IPPROTO_UDP:
463                 case IPPROTO_SCTP:
464                 case IPPROTO_ICMP:
465                 case IPPROTO_ICMPV6:
466                         key_layer |= NFP_FLOWER_LAYER_TP;
467                         key_size += sizeof(struct nfp_flower_tp_ports);
468                         break;
469                 }
470         }
471
472         if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
473             nfp_flower_check_higher_than_l3(flow)) {
474                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
475                 return -EOPNOTSUPP;
476         }
477
478         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
479                 struct flow_match_tcp tcp;
480                 u32 tcp_flags;
481
482                 flow_rule_match_tcp(rule, &tcp);
483                 tcp_flags = be16_to_cpu(tcp.key->flags);
484
485                 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
486                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
487                         return -EOPNOTSUPP;
488                 }
489
490                 /* We only support PSH and URG flags when either
491                  * FIN, SYN or RST is present as well.
492                  */
493                 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
494                     !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
495                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
496                         return -EOPNOTSUPP;
497                 }
498
499                 /* We need to store TCP flags in the either the IPv4 or IPv6 key
500                  * space, thus we need to ensure we include a IPv4/IPv6 key
501                  * layer if we have not done so already.
502                  */
503                 if (!basic.key) {
504                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
505                         return -EOPNOTSUPP;
506                 }
507
508                 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
509                     !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
510                         switch (basic.key->n_proto) {
511                         case cpu_to_be16(ETH_P_IP):
512                                 key_layer |= NFP_FLOWER_LAYER_IPV4;
513                                 key_size += sizeof(struct nfp_flower_ipv4);
514                                 break;
515
516                         case cpu_to_be16(ETH_P_IPV6):
517                                         key_layer |= NFP_FLOWER_LAYER_IPV6;
518                                 key_size += sizeof(struct nfp_flower_ipv6);
519                                 break;
520
521                         default:
522                                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
523                                 return -EOPNOTSUPP;
524                         }
525                 }
526         }
527
528         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
529                 struct flow_match_control ctl;
530
531                 flow_rule_match_control(rule, &ctl);
532                 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
533                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
534                         return -EOPNOTSUPP;
535                 }
536         }
537
538         ret_key_ls->key_layer = key_layer;
539         ret_key_ls->key_layer_two = key_layer_two;
540         ret_key_ls->key_size = key_size;
541
542         return 0;
543 }
544
545 static struct nfp_fl_payload *
546 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
547 {
548         struct nfp_fl_payload *flow_pay;
549
550         flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
551         if (!flow_pay)
552                 return NULL;
553
554         flow_pay->meta.key_len = key_layer->key_size;
555         flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
556         if (!flow_pay->unmasked_data)
557                 goto err_free_flow;
558
559         flow_pay->meta.mask_len = key_layer->key_size;
560         flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
561         if (!flow_pay->mask_data)
562                 goto err_free_unmasked;
563
564         flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
565         if (!flow_pay->action_data)
566                 goto err_free_mask;
567
568         flow_pay->nfp_tun_ipv4_addr = 0;
569         flow_pay->nfp_tun_ipv6 = NULL;
570         flow_pay->meta.flags = 0;
571         INIT_LIST_HEAD(&flow_pay->linked_flows);
572         flow_pay->in_hw = false;
573         flow_pay->pre_tun_rule.dev = NULL;
574
575         return flow_pay;
576
577 err_free_mask:
578         kfree(flow_pay->mask_data);
579 err_free_unmasked:
580         kfree(flow_pay->unmasked_data);
581 err_free_flow:
582         kfree(flow_pay);
583         return NULL;
584 }
585
586 static int
587 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
588                                      struct nfp_flower_merge_check *merge,
589                                      u8 *last_act_id, int *act_out)
590 {
591         struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
592         struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
593         struct nfp_fl_set_ip4_addrs *ipv4_add;
594         struct nfp_fl_set_ipv6_addr *ipv6_add;
595         struct nfp_fl_push_vlan *push_vlan;
596         struct nfp_fl_pre_tunnel *pre_tun;
597         struct nfp_fl_set_tport *tport;
598         struct nfp_fl_set_eth *eth;
599         struct nfp_fl_act_head *a;
600         unsigned int act_off = 0;
601         bool ipv6_tun = false;
602         u8 act_id = 0;
603         u8 *ports;
604         int i;
605
606         while (act_off < flow->meta.act_len) {
607                 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
608                 act_id = a->jump_id;
609
610                 switch (act_id) {
611                 case NFP_FL_ACTION_OPCODE_OUTPUT:
612                         if (act_out)
613                                 (*act_out)++;
614                         break;
615                 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
616                         push_vlan = (struct nfp_fl_push_vlan *)a;
617                         if (push_vlan->vlan_tci)
618                                 merge->tci = cpu_to_be16(0xffff);
619                         break;
620                 case NFP_FL_ACTION_OPCODE_POP_VLAN:
621                         merge->tci = cpu_to_be16(0);
622                         break;
623                 case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
624                         /* New tunnel header means l2 to l4 can be matched. */
625                         eth_broadcast_addr(&merge->l2.mac_dst[0]);
626                         eth_broadcast_addr(&merge->l2.mac_src[0]);
627                         memset(&merge->l4, 0xff,
628                                sizeof(struct nfp_flower_tp_ports));
629                         if (ipv6_tun)
630                                 memset(&merge->ipv6, 0xff,
631                                        sizeof(struct nfp_flower_ipv6));
632                         else
633                                 memset(&merge->ipv4, 0xff,
634                                        sizeof(struct nfp_flower_ipv4));
635                         break;
636                 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
637                         eth = (struct nfp_fl_set_eth *)a;
638                         for (i = 0; i < ETH_ALEN; i++)
639                                 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
640                         for (i = 0; i < ETH_ALEN; i++)
641                                 merge->l2.mac_src[i] |=
642                                         eth->eth_addr_mask[ETH_ALEN + i];
643                         break;
644                 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
645                         ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
646                         merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
647                         merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
648                         break;
649                 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
650                         ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
651                         merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
652                         merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
653                         break;
654                 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
655                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
656                         for (i = 0; i < 4; i++)
657                                 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
658                                         ipv6_add->ipv6[i].mask;
659                         break;
660                 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
661                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
662                         for (i = 0; i < 4; i++)
663                                 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
664                                         ipv6_add->ipv6[i].mask;
665                         break;
666                 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
667                         ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
668                         merge->ipv6.ip_ext.ttl |=
669                                 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
670                         merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
671                         merge->ipv6.ipv6_flow_label_exthdr |=
672                                 ipv6_tc_hl_fl->ipv6_label_mask;
673                         break;
674                 case NFP_FL_ACTION_OPCODE_SET_UDP:
675                 case NFP_FL_ACTION_OPCODE_SET_TCP:
676                         tport = (struct nfp_fl_set_tport *)a;
677                         ports = (u8 *)&merge->l4.port_src;
678                         for (i = 0; i < 4; i++)
679                                 ports[i] |= tport->tp_port_mask[i];
680                         break;
681                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
682                         pre_tun = (struct nfp_fl_pre_tunnel *)a;
683                         ipv6_tun = be16_to_cpu(pre_tun->flags) &
684                                         NFP_FL_PRE_TUN_IPV6;
685                         break;
686                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
687                 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
688                         break;
689                 default:
690                         return -EOPNOTSUPP;
691                 }
692
693                 act_off += a->len_lw << NFP_FL_LW_SIZ;
694         }
695
696         if (last_act_id)
697                 *last_act_id = act_id;
698
699         return 0;
700 }
701
702 static int
703 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
704                                 struct nfp_flower_merge_check *merge,
705                                 bool extra_fields)
706 {
707         struct nfp_flower_meta_tci *meta_tci;
708         u8 *mask = flow->mask_data;
709         u8 key_layer, match_size;
710
711         memset(merge, 0, sizeof(struct nfp_flower_merge_check));
712
713         meta_tci = (struct nfp_flower_meta_tci *)mask;
714         key_layer = meta_tci->nfp_flow_key_layer;
715
716         if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
717                 return -EOPNOTSUPP;
718
719         merge->tci = meta_tci->tci;
720         mask += sizeof(struct nfp_flower_meta_tci);
721
722         if (key_layer & NFP_FLOWER_LAYER_EXT_META)
723                 mask += sizeof(struct nfp_flower_ext_meta);
724
725         mask += sizeof(struct nfp_flower_in_port);
726
727         if (key_layer & NFP_FLOWER_LAYER_MAC) {
728                 match_size = sizeof(struct nfp_flower_mac_mpls);
729                 memcpy(&merge->l2, mask, match_size);
730                 mask += match_size;
731         }
732
733         if (key_layer & NFP_FLOWER_LAYER_TP) {
734                 match_size = sizeof(struct nfp_flower_tp_ports);
735                 memcpy(&merge->l4, mask, match_size);
736                 mask += match_size;
737         }
738
739         if (key_layer & NFP_FLOWER_LAYER_IPV4) {
740                 match_size = sizeof(struct nfp_flower_ipv4);
741                 memcpy(&merge->ipv4, mask, match_size);
742         }
743
744         if (key_layer & NFP_FLOWER_LAYER_IPV6) {
745                 match_size = sizeof(struct nfp_flower_ipv6);
746                 memcpy(&merge->ipv6, mask, match_size);
747         }
748
749         return 0;
750 }
751
752 static int
753 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
754                      struct nfp_fl_payload *sub_flow2)
755 {
756         /* Two flows can be merged if sub_flow2 only matches on bits that are
757          * either matched by sub_flow1 or set by a sub_flow1 action. This
758          * ensures that every packet that hits sub_flow1 and recirculates is
759          * guaranteed to hit sub_flow2.
760          */
761         struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
762         int err, act_out = 0;
763         u8 last_act_id = 0;
764
765         err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
766                                               true);
767         if (err)
768                 return err;
769
770         err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
771                                               false);
772         if (err)
773                 return err;
774
775         err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
776                                                    &last_act_id, &act_out);
777         if (err)
778                 return err;
779
780         /* Must only be 1 output action and it must be the last in sequence. */
781         if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
782                 return -EOPNOTSUPP;
783
784         /* Reject merge if sub_flow2 matches on something that is not matched
785          * on or set in an action by sub_flow1.
786          */
787         err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
788                             sub_flow1_merge.vals,
789                             sizeof(struct nfp_flower_merge_check) * 8);
790         if (err)
791                 return -EINVAL;
792
793         return 0;
794 }
795
796 static unsigned int
797 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
798                             bool *tunnel_act)
799 {
800         unsigned int act_off = 0, act_len;
801         struct nfp_fl_act_head *a;
802         u8 act_id = 0;
803
804         while (act_off < len) {
805                 a = (struct nfp_fl_act_head *)&act_src[act_off];
806                 act_len = a->len_lw << NFP_FL_LW_SIZ;
807                 act_id = a->jump_id;
808
809                 switch (act_id) {
810                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
811                         if (tunnel_act)
812                                 *tunnel_act = true;
813                         fallthrough;
814                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
815                         memcpy(act_dst + act_off, act_src + act_off, act_len);
816                         break;
817                 default:
818                         return act_off;
819                 }
820
821                 act_off += act_len;
822         }
823
824         return act_off;
825 }
826
827 static int
828 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
829 {
830         struct nfp_fl_act_head *a;
831         unsigned int act_off = 0;
832
833         while (act_off < len) {
834                 a = (struct nfp_fl_act_head *)&acts[act_off];
835
836                 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
837                         *vlan = (struct nfp_fl_push_vlan *)a;
838                 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
839                         return -EOPNOTSUPP;
840
841                 act_off += a->len_lw << NFP_FL_LW_SIZ;
842         }
843
844         /* Ensure any VLAN push also has an egress action. */
845         if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
846                 return -EOPNOTSUPP;
847
848         return 0;
849 }
850
851 static int
852 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
853 {
854         struct nfp_fl_set_tun *tun;
855         struct nfp_fl_act_head *a;
856         unsigned int act_off = 0;
857
858         while (act_off < len) {
859                 a = (struct nfp_fl_act_head *)&acts[act_off];
860
861                 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
862                         tun = (struct nfp_fl_set_tun *)a;
863                         tun->outer_vlan_tpid = vlan->vlan_tpid;
864                         tun->outer_vlan_tci = vlan->vlan_tci;
865
866                         return 0;
867                 }
868
869                 act_off += a->len_lw << NFP_FL_LW_SIZ;
870         }
871
872         /* Return error if no tunnel action is found. */
873         return -EOPNOTSUPP;
874 }
875
876 static int
877 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
878                         struct nfp_fl_payload *sub_flow2,
879                         struct nfp_fl_payload *merge_flow)
880 {
881         unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
882         struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
883         bool tunnel_act = false;
884         char *merge_act;
885         int err;
886
887         /* The last action of sub_flow1 must be output - do not merge this. */
888         sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
889         sub2_act_len = sub_flow2->meta.act_len;
890
891         if (!sub2_act_len)
892                 return -EINVAL;
893
894         if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
895                 return -EINVAL;
896
897         /* A shortcut can only be applied if there is a single action. */
898         if (sub1_act_len)
899                 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
900         else
901                 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
902
903         merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
904         merge_act = merge_flow->action_data;
905
906         /* Copy any pre-actions to the start of merge flow action list. */
907         pre_off1 = nfp_flower_copy_pre_actions(merge_act,
908                                                sub_flow1->action_data,
909                                                sub1_act_len, &tunnel_act);
910         merge_act += pre_off1;
911         sub1_act_len -= pre_off1;
912         pre_off2 = nfp_flower_copy_pre_actions(merge_act,
913                                                sub_flow2->action_data,
914                                                sub2_act_len, NULL);
915         merge_act += pre_off2;
916         sub2_act_len -= pre_off2;
917
918         /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
919          * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
920          * valid merge.
921          */
922         if (tunnel_act) {
923                 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
924
925                 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
926                                                   &post_tun_push_vlan);
927                 if (err)
928                         return err;
929
930                 if (post_tun_push_vlan) {
931                         pre_off2 += sizeof(*post_tun_push_vlan);
932                         sub2_act_len -= sizeof(*post_tun_push_vlan);
933                 }
934         }
935
936         /* Copy remaining actions from sub_flows 1 and 2. */
937         memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
938
939         if (post_tun_push_vlan) {
940                 /* Update tunnel action in merge to include VLAN push. */
941                 err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
942                                                  post_tun_push_vlan);
943                 if (err)
944                         return err;
945
946                 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
947         }
948
949         merge_act += sub1_act_len;
950         memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
951
952         return 0;
953 }
954
955 /* Flow link code should only be accessed under RTNL. */
956 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
957 {
958         list_del(&link->merge_flow.list);
959         list_del(&link->sub_flow.list);
960         kfree(link);
961 }
962
963 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
964                                     struct nfp_fl_payload *sub_flow)
965 {
966         struct nfp_fl_payload_link *link;
967
968         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
969                 if (link->sub_flow.flow == sub_flow) {
970                         nfp_flower_unlink_flow(link);
971                         return;
972                 }
973 }
974
975 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
976                                  struct nfp_fl_payload *sub_flow)
977 {
978         struct nfp_fl_payload_link *link;
979
980         link = kmalloc(sizeof(*link), GFP_KERNEL);
981         if (!link)
982                 return -ENOMEM;
983
984         link->merge_flow.flow = merge_flow;
985         list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
986         link->sub_flow.flow = sub_flow;
987         list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
988
989         return 0;
990 }
991
992 /**
993  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
994  * @app:        Pointer to the APP handle
995  * @sub_flow1:  Initial flow matched to produce merge hint
996  * @sub_flow2:  Post recirculation flow matched in merge hint
997  *
998  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
999  * and offloading the new, merged flow.
1000  *
1001  * Return: negative value on error, 0 in success.
1002  */
1003 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1004                                      struct nfp_fl_payload *sub_flow1,
1005                                      struct nfp_fl_payload *sub_flow2)
1006 {
1007         struct flow_cls_offload merge_tc_off;
1008         struct nfp_flower_priv *priv = app->priv;
1009         struct netlink_ext_ack *extack = NULL;
1010         struct nfp_fl_payload *merge_flow;
1011         struct nfp_fl_key_ls merge_key_ls;
1012         int err;
1013
1014         ASSERT_RTNL();
1015
1016         extack = merge_tc_off.common.extack;
1017         if (sub_flow1 == sub_flow2 ||
1018             nfp_flower_is_merge_flow(sub_flow1) ||
1019             nfp_flower_is_merge_flow(sub_flow2))
1020                 return -EINVAL;
1021
1022         err = nfp_flower_can_merge(sub_flow1, sub_flow2);
1023         if (err)
1024                 return err;
1025
1026         merge_key_ls.key_size = sub_flow1->meta.key_len;
1027
1028         merge_flow = nfp_flower_allocate_new(&merge_key_ls);
1029         if (!merge_flow)
1030                 return -ENOMEM;
1031
1032         merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
1033         merge_flow->ingress_dev = sub_flow1->ingress_dev;
1034
1035         memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
1036                sub_flow1->meta.key_len);
1037         memcpy(merge_flow->mask_data, sub_flow1->mask_data,
1038                sub_flow1->meta.mask_len);
1039
1040         err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
1041         if (err)
1042                 goto err_destroy_merge_flow;
1043
1044         err = nfp_flower_link_flows(merge_flow, sub_flow1);
1045         if (err)
1046                 goto err_destroy_merge_flow;
1047
1048         err = nfp_flower_link_flows(merge_flow, sub_flow2);
1049         if (err)
1050                 goto err_unlink_sub_flow1;
1051
1052         merge_tc_off.cookie = merge_flow->tc_flower_cookie;
1053         err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
1054                                         merge_flow->ingress_dev, extack);
1055         if (err)
1056                 goto err_unlink_sub_flow2;
1057
1058         err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
1059                                      nfp_flower_table_params);
1060         if (err)
1061                 goto err_release_metadata;
1062
1063         err = nfp_flower_xmit_flow(app, merge_flow,
1064                                    NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1065         if (err)
1066                 goto err_remove_rhash;
1067
1068         merge_flow->in_hw = true;
1069         sub_flow1->in_hw = false;
1070
1071         return 0;
1072
1073 err_remove_rhash:
1074         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1075                                             &merge_flow->fl_node,
1076                                             nfp_flower_table_params));
1077 err_release_metadata:
1078         nfp_modify_flow_metadata(app, merge_flow);
1079 err_unlink_sub_flow2:
1080         nfp_flower_unlink_flows(merge_flow, sub_flow2);
1081 err_unlink_sub_flow1:
1082         nfp_flower_unlink_flows(merge_flow, sub_flow1);
1083 err_destroy_merge_flow:
1084         kfree(merge_flow->action_data);
1085         kfree(merge_flow->mask_data);
1086         kfree(merge_flow->unmasked_data);
1087         kfree(merge_flow);
1088         return err;
1089 }
1090
1091 /**
1092  * nfp_flower_validate_pre_tun_rule()
1093  * @app:        Pointer to the APP handle
1094  * @flow:       Pointer to NFP flow representation of rule
1095  * @key_ls:     Pointer to NFP key layers structure
1096  * @extack:     Netlink extended ACK report
1097  *
1098  * Verifies the flow as a pre-tunnel rule.
1099  *
1100  * Return: negative value on error, 0 if verified.
1101  */
1102 static int
1103 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1104                                  struct nfp_fl_payload *flow,
1105                                  struct nfp_fl_key_ls *key_ls,
1106                                  struct netlink_ext_ack *extack)
1107 {
1108         struct nfp_flower_priv *priv = app->priv;
1109         struct nfp_flower_meta_tci *meta_tci;
1110         struct nfp_flower_mac_mpls *mac;
1111         u8 *ext = flow->unmasked_data;
1112         struct nfp_fl_act_head *act;
1113         u8 *mask = flow->mask_data;
1114         bool vlan = false;
1115         int act_offset;
1116         u8 key_layer;
1117
1118         meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1119         key_layer = key_ls->key_layer;
1120         if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1121                 if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1122                         u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1123
1124                         vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1125                         flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1126                         vlan = true;
1127                 } else {
1128                         flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1129                 }
1130         }
1131
1132         if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1133                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1134                 return -EOPNOTSUPP;
1135         } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
1136                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
1137                 return -EOPNOTSUPP;
1138         }
1139
1140         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1141                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1142                 return -EOPNOTSUPP;
1143         }
1144
1145         if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
1146             !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
1147                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
1148                 return -EOPNOTSUPP;
1149         }
1150
1151         /* Skip fields known to exist. */
1152         mask += sizeof(struct nfp_flower_meta_tci);
1153         ext += sizeof(struct nfp_flower_meta_tci);
1154         if (key_ls->key_layer_two) {
1155                 mask += sizeof(struct nfp_flower_ext_meta);
1156                 ext += sizeof(struct nfp_flower_ext_meta);
1157         }
1158         mask += sizeof(struct nfp_flower_in_port);
1159         ext += sizeof(struct nfp_flower_in_port);
1160
1161         /* Ensure destination MAC address matches pre_tun_dev. */
1162         mac = (struct nfp_flower_mac_mpls *)ext;
1163         if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
1164                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
1165                 return -EOPNOTSUPP;
1166         }
1167
1168         /* Ensure destination MAC address is fully matched. */
1169         mac = (struct nfp_flower_mac_mpls *)mask;
1170         if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1171                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1172                 return -EOPNOTSUPP;
1173         }
1174
1175         if (mac->mpls_lse) {
1176                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
1177                 return -EOPNOTSUPP;
1178         }
1179
1180         mask += sizeof(struct nfp_flower_mac_mpls);
1181         ext += sizeof(struct nfp_flower_mac_mpls);
1182         if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
1183             key_layer & NFP_FLOWER_LAYER_IPV6) {
1184                 /* Flags and proto fields have same offset in IPv4 and IPv6. */
1185                 int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1186                 int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1187                 int size;
1188                 int i;
1189
1190                 size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
1191                         sizeof(struct nfp_flower_ipv4) :
1192                         sizeof(struct nfp_flower_ipv6);
1193
1194
1195                 /* Ensure proto and flags are the only IP layer fields. */
1196                 for (i = 0; i < size; i++)
1197                         if (mask[i] && i != ip_flags && i != ip_proto) {
1198                                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1199                                 return -EOPNOTSUPP;
1200                         }
1201                 ext += size;
1202                 mask += size;
1203         }
1204
1205         if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1206                 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
1207                         struct nfp_flower_vlan *vlan_tags;
1208                         u16 vlan_tci;
1209
1210                         vlan_tags = (struct nfp_flower_vlan *)ext;
1211
1212                         vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
1213
1214                         vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1215                         flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1216                         vlan = true;
1217                 } else {
1218                         flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1219                 }
1220         }
1221
1222         /* Action must be a single egress or pop_vlan and egress. */
1223         act_offset = 0;
1224         act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1225         if (vlan) {
1226                 if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1227                         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1228                         return -EOPNOTSUPP;
1229                 }
1230
1231                 act_offset += act->len_lw << NFP_FL_LW_SIZ;
1232                 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1233         }
1234
1235         if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1236                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1237                 return -EOPNOTSUPP;
1238         }
1239
1240         act_offset += act->len_lw << NFP_FL_LW_SIZ;
1241
1242         /* Ensure there are no more actions after egress. */
1243         if (act_offset != flow->meta.act_len) {
1244                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1245                 return -EOPNOTSUPP;
1246         }
1247
1248         return 0;
1249 }
1250
1251 /**
1252  * nfp_flower_add_offload() - Adds a new flow to hardware.
1253  * @app:        Pointer to the APP handle
1254  * @netdev:     netdev structure.
1255  * @flow:       TC flower classifier offload structure.
1256  *
1257  * Adds a new flow to the repeated hash structure and action payload.
1258  *
1259  * Return: negative value on error, 0 if configured successfully.
1260  */
1261 static int
1262 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1263                        struct flow_cls_offload *flow)
1264 {
1265         enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1266         struct nfp_flower_priv *priv = app->priv;
1267         struct netlink_ext_ack *extack = NULL;
1268         struct nfp_fl_payload *flow_pay;
1269         struct nfp_fl_key_ls *key_layer;
1270         struct nfp_port *port = NULL;
1271         int err;
1272
1273         extack = flow->common.extack;
1274         if (nfp_netdev_is_nfp_repr(netdev))
1275                 port = nfp_port_from_netdev(netdev);
1276
1277         key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1278         if (!key_layer)
1279                 return -ENOMEM;
1280
1281         err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
1282                                               &tun_type, extack);
1283         if (err)
1284                 goto err_free_key_ls;
1285
1286         flow_pay = nfp_flower_allocate_new(key_layer);
1287         if (!flow_pay) {
1288                 err = -ENOMEM;
1289                 goto err_free_key_ls;
1290         }
1291
1292         err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
1293                                             flow_pay, tun_type, extack);
1294         if (err)
1295                 goto err_destroy_flow;
1296
1297         err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
1298         if (err)
1299                 goto err_destroy_flow;
1300
1301         if (flow_pay->pre_tun_rule.dev) {
1302                 err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
1303                 if (err)
1304                         goto err_destroy_flow;
1305         }
1306
1307         err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
1308         if (err)
1309                 goto err_destroy_flow;
1310
1311         flow_pay->tc_flower_cookie = flow->cookie;
1312         err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1313                                      nfp_flower_table_params);
1314         if (err) {
1315                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1316                 goto err_release_metadata;
1317         }
1318
1319         if (flow_pay->pre_tun_rule.dev)
1320                 err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1321         else
1322                 err = nfp_flower_xmit_flow(app, flow_pay,
1323                                            NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1324         if (err)
1325                 goto err_remove_rhash;
1326
1327         if (port)
1328                 port->tc_offload_cnt++;
1329
1330         flow_pay->in_hw = true;
1331
1332         /* Deallocate flow payload when flower rule has been destroyed. */
1333         kfree(key_layer);
1334
1335         return 0;
1336
1337 err_remove_rhash:
1338         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1339                                             &flow_pay->fl_node,
1340                                             nfp_flower_table_params));
1341 err_release_metadata:
1342         nfp_modify_flow_metadata(app, flow_pay);
1343 err_destroy_flow:
1344         if (flow_pay->nfp_tun_ipv6)
1345                 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1346         kfree(flow_pay->action_data);
1347         kfree(flow_pay->mask_data);
1348         kfree(flow_pay->unmasked_data);
1349         kfree(flow_pay);
1350 err_free_key_ls:
1351         kfree(key_layer);
1352         return err;
1353 }
1354
1355 static void
1356 nfp_flower_remove_merge_flow(struct nfp_app *app,
1357                              struct nfp_fl_payload *del_sub_flow,
1358                              struct nfp_fl_payload *merge_flow)
1359 {
1360         struct nfp_flower_priv *priv = app->priv;
1361         struct nfp_fl_payload_link *link, *temp;
1362         struct nfp_fl_payload *origin;
1363         bool mod = false;
1364         int err;
1365
1366         link = list_first_entry(&merge_flow->linked_flows,
1367                                 struct nfp_fl_payload_link, merge_flow.list);
1368         origin = link->sub_flow.flow;
1369
1370         /* Re-add rule the merge had overwritten if it has not been deleted. */
1371         if (origin != del_sub_flow)
1372                 mod = true;
1373
1374         err = nfp_modify_flow_metadata(app, merge_flow);
1375         if (err) {
1376                 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1377                 goto err_free_links;
1378         }
1379
1380         if (!mod) {
1381                 err = nfp_flower_xmit_flow(app, merge_flow,
1382                                            NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1383                 if (err) {
1384                         nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1385                         goto err_free_links;
1386                 }
1387         } else {
1388                 __nfp_modify_flow_metadata(priv, origin);
1389                 err = nfp_flower_xmit_flow(app, origin,
1390                                            NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1391                 if (err)
1392                         nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1393                 origin->in_hw = true;
1394         }
1395
1396 err_free_links:
1397         /* Clean any links connected with the merged flow. */
1398         list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1399                                  merge_flow.list)
1400                 nfp_flower_unlink_flow(link);
1401
1402         kfree(merge_flow->action_data);
1403         kfree(merge_flow->mask_data);
1404         kfree(merge_flow->unmasked_data);
1405         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1406                                             &merge_flow->fl_node,
1407                                             nfp_flower_table_params));
1408         kfree_rcu(merge_flow, rcu);
1409 }
1410
1411 static void
1412 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1413                                   struct nfp_fl_payload *sub_flow)
1414 {
1415         struct nfp_fl_payload_link *link, *temp;
1416
1417         /* Remove any merge flow formed from the deleted sub_flow. */
1418         list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1419                                  sub_flow.list)
1420                 nfp_flower_remove_merge_flow(app, sub_flow,
1421                                              link->merge_flow.flow);
1422 }
1423
1424 /**
1425  * nfp_flower_del_offload() - Removes a flow from hardware.
1426  * @app:        Pointer to the APP handle
1427  * @netdev:     netdev structure.
1428  * @flow:       TC flower classifier offload structure
1429  *
1430  * Removes a flow from the repeated hash structure and clears the
1431  * action payload. Any flows merged from this are also deleted.
1432  *
1433  * Return: negative value on error, 0 if removed successfully.
1434  */
1435 static int
1436 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1437                        struct flow_cls_offload *flow)
1438 {
1439         struct nfp_flower_priv *priv = app->priv;
1440         struct netlink_ext_ack *extack = NULL;
1441         struct nfp_fl_payload *nfp_flow;
1442         struct nfp_port *port = NULL;
1443         int err;
1444
1445         extack = flow->common.extack;
1446         if (nfp_netdev_is_nfp_repr(netdev))
1447                 port = nfp_port_from_netdev(netdev);
1448
1449         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1450         if (!nfp_flow) {
1451                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1452                 return -ENOENT;
1453         }
1454
1455         err = nfp_modify_flow_metadata(app, nfp_flow);
1456         if (err)
1457                 goto err_free_merge_flow;
1458
1459         if (nfp_flow->nfp_tun_ipv4_addr)
1460                 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1461
1462         if (nfp_flow->nfp_tun_ipv6)
1463                 nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
1464
1465         if (!nfp_flow->in_hw) {
1466                 err = 0;
1467                 goto err_free_merge_flow;
1468         }
1469
1470         if (nfp_flow->pre_tun_rule.dev)
1471                 err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1472         else
1473                 err = nfp_flower_xmit_flow(app, nfp_flow,
1474                                            NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1475         /* Fall through on error. */
1476
1477 err_free_merge_flow:
1478         nfp_flower_del_linked_merge_flows(app, nfp_flow);
1479         if (port)
1480                 port->tc_offload_cnt--;
1481         kfree(nfp_flow->action_data);
1482         kfree(nfp_flow->mask_data);
1483         kfree(nfp_flow->unmasked_data);
1484         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1485                                             &nfp_flow->fl_node,
1486                                             nfp_flower_table_params));
1487         kfree_rcu(nfp_flow, rcu);
1488         return err;
1489 }
1490
1491 static void
1492 __nfp_flower_update_merge_stats(struct nfp_app *app,
1493                                 struct nfp_fl_payload *merge_flow)
1494 {
1495         struct nfp_flower_priv *priv = app->priv;
1496         struct nfp_fl_payload_link *link;
1497         struct nfp_fl_payload *sub_flow;
1498         u64 pkts, bytes, used;
1499         u32 ctx_id;
1500
1501         ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1502         pkts = priv->stats[ctx_id].pkts;
1503         /* Do not cycle subflows if no stats to distribute. */
1504         if (!pkts)
1505                 return;
1506         bytes = priv->stats[ctx_id].bytes;
1507         used = priv->stats[ctx_id].used;
1508
1509         /* Reset stats for the merge flow. */
1510         priv->stats[ctx_id].pkts = 0;
1511         priv->stats[ctx_id].bytes = 0;
1512
1513         /* The merge flow has received stats updates from firmware.
1514          * Distribute these stats to all subflows that form the merge.
1515          * The stats will collected from TC via the subflows.
1516          */
1517         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1518                 sub_flow = link->sub_flow.flow;
1519                 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1520                 priv->stats[ctx_id].pkts += pkts;
1521                 priv->stats[ctx_id].bytes += bytes;
1522                 priv->stats[ctx_id].used = max_t(u64, used,
1523                                                  priv->stats[ctx_id].used);
1524         }
1525 }
1526
1527 static void
1528 nfp_flower_update_merge_stats(struct nfp_app *app,
1529                               struct nfp_fl_payload *sub_flow)
1530 {
1531         struct nfp_fl_payload_link *link;
1532
1533         /* Get merge flows that the subflow forms to distribute their stats. */
1534         list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1535                 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1536 }
1537
1538 /**
1539  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1540  * @app:        Pointer to the APP handle
1541  * @netdev:     Netdev structure.
1542  * @flow:       TC flower classifier offload structure
1543  *
1544  * Populates a flow statistics structure which which corresponds to a
1545  * specific flow.
1546  *
1547  * Return: negative value on error, 0 if stats populated successfully.
1548  */
1549 static int
1550 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1551                      struct flow_cls_offload *flow)
1552 {
1553         struct nfp_flower_priv *priv = app->priv;
1554         struct netlink_ext_ack *extack = NULL;
1555         struct nfp_fl_payload *nfp_flow;
1556         u32 ctx_id;
1557
1558         extack = flow->common.extack;
1559         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1560         if (!nfp_flow) {
1561                 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1562                 return -EINVAL;
1563         }
1564
1565         ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1566
1567         spin_lock_bh(&priv->stats_lock);
1568         /* If request is for a sub_flow, update stats from merged flows. */
1569         if (!list_empty(&nfp_flow->linked_flows))
1570                 nfp_flower_update_merge_stats(app, nfp_flow);
1571
1572         flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1573                           priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
1574                           FLOW_ACTION_HW_STATS_DELAYED);
1575
1576         priv->stats[ctx_id].pkts = 0;
1577         priv->stats[ctx_id].bytes = 0;
1578         spin_unlock_bh(&priv->stats_lock);
1579
1580         return 0;
1581 }
1582
1583 static int
1584 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1585                         struct flow_cls_offload *flower)
1586 {
1587         if (!eth_proto_is_802_3(flower->common.protocol))
1588                 return -EOPNOTSUPP;
1589
1590         switch (flower->command) {
1591         case FLOW_CLS_REPLACE:
1592                 return nfp_flower_add_offload(app, netdev, flower);
1593         case FLOW_CLS_DESTROY:
1594                 return nfp_flower_del_offload(app, netdev, flower);
1595         case FLOW_CLS_STATS:
1596                 return nfp_flower_get_stats(app, netdev, flower);
1597         default:
1598                 return -EOPNOTSUPP;
1599         }
1600 }
1601
1602 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1603                                         void *type_data, void *cb_priv)
1604 {
1605         struct nfp_repr *repr = cb_priv;
1606
1607         if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1608                 return -EOPNOTSUPP;
1609
1610         switch (type) {
1611         case TC_SETUP_CLSFLOWER:
1612                 return nfp_flower_repr_offload(repr->app, repr->netdev,
1613                                                type_data);
1614         case TC_SETUP_CLSMATCHALL:
1615                 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1616                                                     type_data);
1617         default:
1618                 return -EOPNOTSUPP;
1619         }
1620 }
1621
1622 static LIST_HEAD(nfp_block_cb_list);
1623
1624 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1625                                      struct flow_block_offload *f)
1626 {
1627         struct nfp_repr *repr = netdev_priv(netdev);
1628         struct nfp_flower_repr_priv *repr_priv;
1629         struct flow_block_cb *block_cb;
1630
1631         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1632                 return -EOPNOTSUPP;
1633
1634         repr_priv = repr->app_priv;
1635         repr_priv->block_shared = f->block_shared;
1636         f->driver_block_list = &nfp_block_cb_list;
1637
1638         switch (f->command) {
1639         case FLOW_BLOCK_BIND:
1640                 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1641                                           &nfp_block_cb_list))
1642                         return -EBUSY;
1643
1644                 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1645                                                repr, repr, NULL);
1646                 if (IS_ERR(block_cb))
1647                         return PTR_ERR(block_cb);
1648
1649                 flow_block_cb_add(block_cb, f);
1650                 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1651                 return 0;
1652         case FLOW_BLOCK_UNBIND:
1653                 block_cb = flow_block_cb_lookup(f->block,
1654                                                 nfp_flower_setup_tc_block_cb,
1655                                                 repr);
1656                 if (!block_cb)
1657                         return -ENOENT;
1658
1659                 flow_block_cb_remove(block_cb, f);
1660                 list_del(&block_cb->driver_list);
1661                 return 0;
1662         default:
1663                 return -EOPNOTSUPP;
1664         }
1665 }
1666
1667 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1668                         enum tc_setup_type type, void *type_data)
1669 {
1670         switch (type) {
1671         case TC_SETUP_BLOCK:
1672                 return nfp_flower_setup_tc_block(netdev, type_data);
1673         default:
1674                 return -EOPNOTSUPP;
1675         }
1676 }
1677
1678 struct nfp_flower_indr_block_cb_priv {
1679         struct net_device *netdev;
1680         struct nfp_app *app;
1681         struct list_head list;
1682 };
1683
1684 static struct nfp_flower_indr_block_cb_priv *
1685 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1686                                      struct net_device *netdev)
1687 {
1688         struct nfp_flower_indr_block_cb_priv *cb_priv;
1689         struct nfp_flower_priv *priv = app->priv;
1690
1691         /* All callback list access should be protected by RTNL. */
1692         ASSERT_RTNL();
1693
1694         list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1695                 if (cb_priv->netdev == netdev)
1696                         return cb_priv;
1697
1698         return NULL;
1699 }
1700
1701 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1702                                           void *type_data, void *cb_priv)
1703 {
1704         struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1705         struct flow_cls_offload *flower = type_data;
1706
1707         if (flower->common.chain_index)
1708                 return -EOPNOTSUPP;
1709
1710         switch (type) {
1711         case TC_SETUP_CLSFLOWER:
1712                 return nfp_flower_repr_offload(priv->app, priv->netdev,
1713                                                type_data);
1714         default:
1715                 return -EOPNOTSUPP;
1716         }
1717 }
1718
1719 void nfp_flower_setup_indr_tc_release(void *cb_priv)
1720 {
1721         struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1722
1723         list_del(&priv->list);
1724         kfree(priv);
1725 }
1726
1727 static int
1728 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
1729                                struct flow_block_offload *f, void *data,
1730                                void (*cleanup)(struct flow_block_cb *block_cb))
1731 {
1732         struct nfp_flower_indr_block_cb_priv *cb_priv;
1733         struct nfp_flower_priv *priv = app->priv;
1734         struct flow_block_cb *block_cb;
1735
1736         if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1737              !nfp_flower_internal_port_can_offload(app, netdev)) ||
1738             (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1739              nfp_flower_internal_port_can_offload(app, netdev)))
1740                 return -EOPNOTSUPP;
1741
1742         switch (f->command) {
1743         case FLOW_BLOCK_BIND:
1744                 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1745                 if (cb_priv &&
1746                     flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1747                                           cb_priv,
1748                                           &nfp_block_cb_list))
1749                         return -EBUSY;
1750
1751                 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1752                 if (!cb_priv)
1753                         return -ENOMEM;
1754
1755                 cb_priv->netdev = netdev;
1756                 cb_priv->app = app;
1757                 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1758
1759                 block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1760                                                     cb_priv, cb_priv,
1761                                                     nfp_flower_setup_indr_tc_release,
1762                                                     f, netdev, sch, data, app, cleanup);
1763                 if (IS_ERR(block_cb)) {
1764                         list_del(&cb_priv->list);
1765                         kfree(cb_priv);
1766                         return PTR_ERR(block_cb);
1767                 }
1768
1769                 flow_block_cb_add(block_cb, f);
1770                 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1771                 return 0;
1772         case FLOW_BLOCK_UNBIND:
1773                 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1774                 if (!cb_priv)
1775                         return -ENOENT;
1776
1777                 block_cb = flow_block_cb_lookup(f->block,
1778                                                 nfp_flower_setup_indr_block_cb,
1779                                                 cb_priv);
1780                 if (!block_cb)
1781                         return -ENOENT;
1782
1783                 flow_indr_block_cb_remove(block_cb, f);
1784                 list_del(&block_cb->driver_list);
1785                 return 0;
1786         default:
1787                 return -EOPNOTSUPP;
1788         }
1789         return 0;
1790 }
1791
1792 int
1793 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1794                             enum tc_setup_type type, void *type_data,
1795                             void *data,
1796                             void (*cleanup)(struct flow_block_cb *block_cb))
1797 {
1798         if (!nfp_fl_is_netdev_to_offload(netdev))
1799                 return -EOPNOTSUPP;
1800
1801         switch (type) {
1802         case TC_SETUP_BLOCK:
1803                 return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
1804                                                       type_data, data, cleanup);
1805         default:
1806                 return -EOPNOTSUPP;
1807         }
1808 }