Merge tag 'nds32-for-linus-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_tc.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19 #include <net/tc_act/tc_tunnel_key.h>
20
21 #include "bnxt_hsi.h"
22 #include "bnxt.h"
23 #include "bnxt_sriov.h"
24 #include "bnxt_tc.h"
25 #include "bnxt_vfr.h"
26
27 #define BNXT_FID_INVALID                        0xffff
28 #define VLAN_TCI(vid, prio)     ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29
30 #define is_vlan_pcp_wildcarded(vlan_tci_mask)   \
31         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
32 #define is_vlan_pcp_exactmatch(vlan_tci_mask)   \
33         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
34 #define is_vlan_pcp_zero(vlan_tci)      \
35         ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
36 #define is_vid_exactmatch(vlan_tci_mask)        \
37         ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
38
39 /* Return the dst fid of the func for flow forwarding
40  * For PFs: src_fid is the fid of the PF
41  * For VF-reps: src_fid the fid of the VF
42  */
43 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
44 {
45         struct bnxt *bp;
46
47         /* check if dev belongs to the same switch */
48         if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
49                 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
50                             dev->ifindex);
51                 return BNXT_FID_INVALID;
52         }
53
54         /* Is dev a VF-rep? */
55         if (bnxt_dev_is_vf_rep(dev))
56                 return bnxt_vf_rep_get_fid(dev);
57
58         bp = netdev_priv(dev);
59         return bp->pf.fw_fid;
60 }
61
62 static int bnxt_tc_parse_redir(struct bnxt *bp,
63                                struct bnxt_tc_actions *actions,
64                                const struct tc_action *tc_act)
65 {
66         struct net_device *dev = tcf_mirred_dev(tc_act);
67
68         if (!dev) {
69                 netdev_info(bp->dev, "no dev in mirred action");
70                 return -EINVAL;
71         }
72
73         actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
74         actions->dst_dev = dev;
75         return 0;
76 }
77
78 static void bnxt_tc_parse_vlan(struct bnxt *bp,
79                                struct bnxt_tc_actions *actions,
80                                const struct tc_action *tc_act)
81 {
82         if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
83                 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
84         } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
85                 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
86                 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
87                 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
88         }
89 }
90
91 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
92                                     struct bnxt_tc_actions *actions,
93                                     const struct tc_action *tc_act)
94 {
95         struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
96         struct ip_tunnel_key *tun_key = &tun_info->key;
97
98         if (ip_tunnel_info_af(tun_info) != AF_INET) {
99                 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
100                 return -EOPNOTSUPP;
101         }
102
103         actions->tun_encap_key = *tun_key;
104         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
105         return 0;
106 }
107
108 static int bnxt_tc_parse_actions(struct bnxt *bp,
109                                  struct bnxt_tc_actions *actions,
110                                  struct tcf_exts *tc_exts)
111 {
112         const struct tc_action *tc_act;
113         LIST_HEAD(tc_actions);
114         int rc;
115
116         if (!tcf_exts_has_actions(tc_exts)) {
117                 netdev_info(bp->dev, "no actions");
118                 return -EINVAL;
119         }
120
121         tcf_exts_to_list(tc_exts, &tc_actions);
122         list_for_each_entry(tc_act, &tc_actions, list) {
123                 /* Drop action */
124                 if (is_tcf_gact_shot(tc_act)) {
125                         actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
126                         return 0; /* don't bother with other actions */
127                 }
128
129                 /* Redirect action */
130                 if (is_tcf_mirred_egress_redirect(tc_act)) {
131                         rc = bnxt_tc_parse_redir(bp, actions, tc_act);
132                         if (rc)
133                                 return rc;
134                         continue;
135                 }
136
137                 /* Push/pop VLAN */
138                 if (is_tcf_vlan(tc_act)) {
139                         bnxt_tc_parse_vlan(bp, actions, tc_act);
140                         continue;
141                 }
142
143                 /* Tunnel encap */
144                 if (is_tcf_tunnel_set(tc_act)) {
145                         rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
146                         if (rc)
147                                 return rc;
148                         continue;
149                 }
150
151                 /* Tunnel decap */
152                 if (is_tcf_tunnel_release(tc_act)) {
153                         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
154                         continue;
155                 }
156         }
157
158         if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
159                 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
160                         /* dst_fid is PF's fid */
161                         actions->dst_fid = bp->pf.fw_fid;
162                 } else {
163                         /* find the FID from dst_dev */
164                         actions->dst_fid =
165                                 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
166                         if (actions->dst_fid == BNXT_FID_INVALID)
167                                 return -EINVAL;
168                 }
169         }
170
171         return 0;
172 }
173
174 #define GET_KEY(flow_cmd, key_type)                                     \
175                 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
176                                           (flow_cmd)->key)
177 #define GET_MASK(flow_cmd, key_type)                                    \
178                 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
179                                           (flow_cmd)->mask)
180
181 static int bnxt_tc_parse_flow(struct bnxt *bp,
182                               struct tc_cls_flower_offload *tc_flow_cmd,
183                               struct bnxt_tc_flow *flow)
184 {
185         struct flow_dissector *dissector = tc_flow_cmd->dissector;
186         u16 addr_type = 0;
187
188         /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
189         if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
190             (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
191                 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
192                             dissector->used_keys);
193                 return -EOPNOTSUPP;
194         }
195
196         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
197                 struct flow_dissector_key_control *key =
198                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
199
200                 addr_type = key->addr_type;
201         }
202
203         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
204                 struct flow_dissector_key_basic *key =
205                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
206                 struct flow_dissector_key_basic *mask =
207                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
208
209                 flow->l2_key.ether_type = key->n_proto;
210                 flow->l2_mask.ether_type = mask->n_proto;
211
212                 if (key->n_proto == htons(ETH_P_IP) ||
213                     key->n_proto == htons(ETH_P_IPV6)) {
214                         flow->l4_key.ip_proto = key->ip_proto;
215                         flow->l4_mask.ip_proto = mask->ip_proto;
216                 }
217         }
218
219         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
220                 struct flow_dissector_key_eth_addrs *key =
221                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
222                 struct flow_dissector_key_eth_addrs *mask =
223                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
224
225                 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
226                 ether_addr_copy(flow->l2_key.dmac, key->dst);
227                 ether_addr_copy(flow->l2_mask.dmac, mask->dst);
228                 ether_addr_copy(flow->l2_key.smac, key->src);
229                 ether_addr_copy(flow->l2_mask.smac, mask->src);
230         }
231
232         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
233                 struct flow_dissector_key_vlan *key =
234                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
235                 struct flow_dissector_key_vlan *mask =
236                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
237
238                 flow->l2_key.inner_vlan_tci =
239                    cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
240                 flow->l2_mask.inner_vlan_tci =
241                    cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
242                 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
243                 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
244                 flow->l2_key.num_vlans = 1;
245         }
246
247         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
248                 struct flow_dissector_key_ipv4_addrs *key =
249                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
250                 struct flow_dissector_key_ipv4_addrs *mask =
251                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
252
253                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
254                 flow->l3_key.ipv4.daddr.s_addr = key->dst;
255                 flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
256                 flow->l3_key.ipv4.saddr.s_addr = key->src;
257                 flow->l3_mask.ipv4.saddr.s_addr = mask->src;
258         } else if (dissector_uses_key(dissector,
259                                       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
260                 struct flow_dissector_key_ipv6_addrs *key =
261                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
262                 struct flow_dissector_key_ipv6_addrs *mask =
263                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
264
265                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
266                 flow->l3_key.ipv6.daddr = key->dst;
267                 flow->l3_mask.ipv6.daddr = mask->dst;
268                 flow->l3_key.ipv6.saddr = key->src;
269                 flow->l3_mask.ipv6.saddr = mask->src;
270         }
271
272         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
273                 struct flow_dissector_key_ports *key =
274                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
275                 struct flow_dissector_key_ports *mask =
276                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
277
278                 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
279                 flow->l4_key.ports.dport = key->dst;
280                 flow->l4_mask.ports.dport = mask->dst;
281                 flow->l4_key.ports.sport = key->src;
282                 flow->l4_mask.ports.sport = mask->src;
283         }
284
285         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
286                 struct flow_dissector_key_icmp *key =
287                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
288                 struct flow_dissector_key_icmp *mask =
289                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
290
291                 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
292                 flow->l4_key.icmp.type = key->type;
293                 flow->l4_key.icmp.code = key->code;
294                 flow->l4_mask.icmp.type = mask->type;
295                 flow->l4_mask.icmp.code = mask->code;
296         }
297
298         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
299                 struct flow_dissector_key_control *key =
300                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
301
302                 addr_type = key->addr_type;
303         }
304
305         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
306                 struct flow_dissector_key_ipv4_addrs *key =
307                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
308                 struct flow_dissector_key_ipv4_addrs *mask =
309                                 GET_MASK(tc_flow_cmd,
310                                          FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
311
312                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
313                 flow->tun_key.u.ipv4.dst = key->dst;
314                 flow->tun_mask.u.ipv4.dst = mask->dst;
315                 flow->tun_key.u.ipv4.src = key->src;
316                 flow->tun_mask.u.ipv4.src = mask->src;
317         } else if (dissector_uses_key(dissector,
318                                       FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
319                 return -EOPNOTSUPP;
320         }
321
322         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
323                 struct flow_dissector_key_keyid *key =
324                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
325                 struct flow_dissector_key_keyid *mask =
326                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
327
328                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
329                 flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
330                 flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
331         }
332
333         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
334                 struct flow_dissector_key_ports *key =
335                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
336                 struct flow_dissector_key_ports *mask =
337                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
338
339                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
340                 flow->tun_key.tp_dst = key->dst;
341                 flow->tun_mask.tp_dst = mask->dst;
342                 flow->tun_key.tp_src = key->src;
343                 flow->tun_mask.tp_src = mask->src;
344         }
345
346         return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
347 }
348
349 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
350 {
351         struct hwrm_cfa_flow_free_input req = { 0 };
352         int rc;
353
354         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
355         req.flow_handle = flow_handle;
356
357         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
358         if (rc)
359                 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
360                             __func__, flow_handle, rc);
361
362         if (rc)
363                 rc = -EIO;
364         return rc;
365 }
366
367 static int ipv6_mask_len(struct in6_addr *mask)
368 {
369         int mask_len = 0, i;
370
371         for (i = 0; i < 4; i++)
372                 mask_len += inet_mask_len(mask->s6_addr32[i]);
373
374         return mask_len;
375 }
376
377 static bool is_wildcard(void *mask, int len)
378 {
379         const u8 *p = mask;
380         int i;
381
382         for (i = 0; i < len; i++) {
383                 if (p[i] != 0)
384                         return false;
385         }
386         return true;
387 }
388
389 static bool is_exactmatch(void *mask, int len)
390 {
391         const u8 *p = mask;
392         int i;
393
394         for (i = 0; i < len; i++)
395                 if (p[i] != 0xff)
396                         return false;
397
398         return true;
399 }
400
401 static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
402                                 __be16  vlan_tci)
403 {
404         /* VLAN priority must be either exactly zero or fully wildcarded and
405          * VLAN id must be exact match.
406          */
407         if (is_vid_exactmatch(vlan_tci_mask) &&
408             ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
409               is_vlan_pcp_zero(vlan_tci)) ||
410              is_vlan_pcp_wildcarded(vlan_tci_mask)))
411                 return true;
412
413         return false;
414 }
415
416 static bool bits_set(void *key, int len)
417 {
418         const u8 *p = key;
419         int i;
420
421         for (i = 0; i < len; i++)
422                 if (p[i] != 0)
423                         return true;
424
425         return false;
426 }
427
428 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
429                                     __le16 ref_flow_handle,
430                                     __le32 tunnel_handle, __le16 *flow_handle)
431 {
432         struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
433         struct bnxt_tc_actions *actions = &flow->actions;
434         struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
435         struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
436         struct hwrm_cfa_flow_alloc_input req = { 0 };
437         u16 flow_flags = 0, action_flags = 0;
438         int rc;
439
440         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
441
442         req.src_fid = cpu_to_le16(flow->src_fid);
443         req.ref_flow_handle = ref_flow_handle;
444
445         if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
446             actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
447                 req.tunnel_handle = tunnel_handle;
448                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
449                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
450         }
451
452         req.ethertype = flow->l2_key.ether_type;
453         req.ip_proto = flow->l4_key.ip_proto;
454
455         if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
456                 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
457                 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
458         }
459
460         if (flow->l2_key.num_vlans > 0) {
461                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
462                 /* FW expects the inner_vlan_tci value to be set
463                  * in outer_vlan_tci when num_vlans is 1 (which is
464                  * always the case in TC.)
465                  */
466                 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
467         }
468
469         /* If all IP and L4 fields are wildcarded then this is an L2 flow */
470         if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
471             is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
472                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
473         } else {
474                 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
475                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
476                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
477
478                 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
479                         req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
480                         req.ip_dst_mask_len =
481                                 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
482                         req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
483                         req.ip_src_mask_len =
484                                 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
485                 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
486                         memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
487                                sizeof(req.ip_dst));
488                         req.ip_dst_mask_len =
489                                         ipv6_mask_len(&l3_mask->ipv6.daddr);
490                         memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
491                                sizeof(req.ip_src));
492                         req.ip_src_mask_len =
493                                         ipv6_mask_len(&l3_mask->ipv6.saddr);
494                 }
495         }
496
497         if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
498                 req.l4_src_port = flow->l4_key.ports.sport;
499                 req.l4_src_port_mask = flow->l4_mask.ports.sport;
500                 req.l4_dst_port = flow->l4_key.ports.dport;
501                 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
502         } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
503                 /* l4 ports serve as type/code when ip_proto is ICMP */
504                 req.l4_src_port = htons(flow->l4_key.icmp.type);
505                 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
506                 req.l4_dst_port = htons(flow->l4_key.icmp.code);
507                 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
508         }
509         req.flags = cpu_to_le16(flow_flags);
510
511         if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
512                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
513         } else {
514                 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
515                         action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
516                         req.dst_fid = cpu_to_le16(actions->dst_fid);
517                 }
518                 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
519                         action_flags |=
520                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
521                         req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
522                         req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
523                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
524                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
525                 }
526                 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
527                         action_flags |=
528                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
529                         /* Rewrite config with tpid = 0 implies vlan pop */
530                         req.l2_rewrite_vlan_tpid = 0;
531                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
532                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
533                 }
534         }
535         req.action_flags = cpu_to_le16(action_flags);
536
537         mutex_lock(&bp->hwrm_cmd_lock);
538         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
539         if (!rc)
540                 *flow_handle = resp->flow_handle;
541         mutex_unlock(&bp->hwrm_cmd_lock);
542
543         if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
544                 rc = -ENOSPC;
545         else if (rc)
546                 rc = -EIO;
547         return rc;
548 }
549
550 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
551                                        struct bnxt_tc_flow *flow,
552                                        struct bnxt_tc_l2_key *l2_info,
553                                        __le32 ref_decap_handle,
554                                        __le32 *decap_filter_handle)
555 {
556         struct hwrm_cfa_decap_filter_alloc_output *resp =
557                                                 bp->hwrm_cmd_resp_addr;
558         struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
559         struct ip_tunnel_key *tun_key = &flow->tun_key;
560         u32 enables = 0;
561         int rc;
562
563         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
564
565         req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
566         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
567                    CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
568         req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
569         req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
570
571         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
572                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
573                 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
574                 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
575         }
576
577         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
578                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
579                 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
580         }
581         if (l2_info->num_vlans) {
582                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
583                 req.t_ivlan_vid = l2_info->inner_vlan_tci;
584         }
585
586         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
587         req.ethertype = htons(ETH_P_IP);
588
589         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
590                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
591                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
592                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
593                 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
594                 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
595                 req.src_ipaddr[0] = tun_key->u.ipv4.src;
596         }
597
598         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
599                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
600                 req.dst_port = tun_key->tp_dst;
601         }
602
603         /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
604          * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
605          */
606         req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
607         req.enables = cpu_to_le32(enables);
608
609         mutex_lock(&bp->hwrm_cmd_lock);
610         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
611         if (!rc)
612                 *decap_filter_handle = resp->decap_filter_id;
613         else
614                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
615         mutex_unlock(&bp->hwrm_cmd_lock);
616
617         if (rc)
618                 rc = -EIO;
619         return rc;
620 }
621
622 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
623                                       __le32 decap_filter_handle)
624 {
625         struct hwrm_cfa_decap_filter_free_input req = { 0 };
626         int rc;
627
628         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
629         req.decap_filter_id = decap_filter_handle;
630
631         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
632         if (rc)
633                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
634
635         if (rc)
636                 rc = -EIO;
637         return rc;
638 }
639
640 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
641                                        struct ip_tunnel_key *encap_key,
642                                        struct bnxt_tc_l2_key *l2_info,
643                                        __le32 *encap_record_handle)
644 {
645         struct hwrm_cfa_encap_record_alloc_output *resp =
646                                                 bp->hwrm_cmd_resp_addr;
647         struct hwrm_cfa_encap_record_alloc_input req = { 0 };
648         struct hwrm_cfa_encap_data_vxlan *encap =
649                         (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
650         struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
651                                 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
652         int rc;
653
654         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
655
656         req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
657
658         ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
659         ether_addr_copy(encap->src_mac_addr, l2_info->smac);
660         if (l2_info->num_vlans) {
661                 encap->num_vlan_tags = l2_info->num_vlans;
662                 encap->ovlan_tci = l2_info->inner_vlan_tci;
663                 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
664         }
665
666         encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
667         encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
668         encap_ipv4->ttl = encap_key->ttl;
669
670         encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
671         encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
672         encap_ipv4->protocol = IPPROTO_UDP;
673
674         encap->dst_port = encap_key->tp_dst;
675         encap->vni = tunnel_id_to_key32(encap_key->tun_id);
676
677         mutex_lock(&bp->hwrm_cmd_lock);
678         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
679         if (!rc)
680                 *encap_record_handle = resp->encap_record_id;
681         else
682                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
683         mutex_unlock(&bp->hwrm_cmd_lock);
684
685         if (rc)
686                 rc = -EIO;
687         return rc;
688 }
689
690 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
691                                       __le32 encap_record_handle)
692 {
693         struct hwrm_cfa_encap_record_free_input req = { 0 };
694         int rc;
695
696         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
697         req.encap_record_id = encap_record_handle;
698
699         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
700         if (rc)
701                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
702
703         if (rc)
704                 rc = -EIO;
705         return rc;
706 }
707
708 static int bnxt_tc_put_l2_node(struct bnxt *bp,
709                                struct bnxt_tc_flow_node *flow_node)
710 {
711         struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
712         struct bnxt_tc_info *tc_info = bp->tc_info;
713         int rc;
714
715         /* remove flow_node from the L2 shared flow list */
716         list_del(&flow_node->l2_list_node);
717         if (--l2_node->refcount == 0) {
718                 rc =  rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
719                                              tc_info->l2_ht_params);
720                 if (rc)
721                         netdev_err(bp->dev,
722                                    "Error: %s: rhashtable_remove_fast: %d",
723                                    __func__, rc);
724                 kfree_rcu(l2_node, rcu);
725         }
726         return 0;
727 }
728
729 static struct bnxt_tc_l2_node *
730 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
731                     struct rhashtable_params ht_params,
732                     struct bnxt_tc_l2_key *l2_key)
733 {
734         struct bnxt_tc_l2_node *l2_node;
735         int rc;
736
737         l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
738         if (!l2_node) {
739                 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
740                 if (!l2_node) {
741                         rc = -ENOMEM;
742                         return NULL;
743                 }
744
745                 l2_node->key = *l2_key;
746                 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
747                                             ht_params);
748                 if (rc) {
749                         kfree_rcu(l2_node, rcu);
750                         netdev_err(bp->dev,
751                                    "Error: %s: rhashtable_insert_fast: %d",
752                                    __func__, rc);
753                         return NULL;
754                 }
755                 INIT_LIST_HEAD(&l2_node->common_l2_flows);
756         }
757         return l2_node;
758 }
759
760 /* Get the ref_flow_handle for a flow by checking if there are any other
761  * flows that share the same L2 key as this flow.
762  */
763 static int
764 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
765                             struct bnxt_tc_flow_node *flow_node,
766                             __le16 *ref_flow_handle)
767 {
768         struct bnxt_tc_info *tc_info = bp->tc_info;
769         struct bnxt_tc_flow_node *ref_flow_node;
770         struct bnxt_tc_l2_node *l2_node;
771
772         l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
773                                       tc_info->l2_ht_params,
774                                       &flow->l2_key);
775         if (!l2_node)
776                 return -1;
777
778         /* If any other flow is using this l2_node, use it's flow_handle
779          * as the ref_flow_handle
780          */
781         if (l2_node->refcount > 0) {
782                 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
783                                                  struct bnxt_tc_flow_node,
784                                                  l2_list_node);
785                 *ref_flow_handle = ref_flow_node->flow_handle;
786         } else {
787                 *ref_flow_handle = cpu_to_le16(0xffff);
788         }
789
790         /* Insert the l2_node into the flow_node so that subsequent flows
791          * with a matching l2 key can use the flow_handle of this flow
792          * as their ref_flow_handle
793          */
794         flow_node->l2_node = l2_node;
795         list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
796         l2_node->refcount++;
797         return 0;
798 }
799
800 /* After the flow parsing is done, this routine is used for checking
801  * if there are any aspects of the flow that prevent it from being
802  * offloaded.
803  */
804 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
805 {
806         /* If L4 ports are specified then ip_proto must be TCP or UDP */
807         if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
808             (flow->l4_key.ip_proto != IPPROTO_TCP &&
809              flow->l4_key.ip_proto != IPPROTO_UDP)) {
810                 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
811                             flow->l4_key.ip_proto);
812                 return false;
813         }
814
815         /* Currently source/dest MAC cannot be partial wildcard  */
816         if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
817             !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
818                 netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
819                 return false;
820         }
821         if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
822             !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
823                 netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
824                 return false;
825         }
826
827         /* Currently VLAN fields cannot be partial wildcard */
828         if (bits_set(&flow->l2_key.inner_vlan_tci,
829                      sizeof(flow->l2_key.inner_vlan_tci)) &&
830             !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
831                                  flow->l2_key.inner_vlan_tci)) {
832                 netdev_info(bp->dev, "Unsupported VLAN TCI\n");
833                 return false;
834         }
835         if (bits_set(&flow->l2_key.inner_vlan_tpid,
836                      sizeof(flow->l2_key.inner_vlan_tpid)) &&
837             !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
838                            sizeof(flow->l2_mask.inner_vlan_tpid))) {
839                 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
840                 return false;
841         }
842
843         /* Currently Ethertype must be set */
844         if (!is_exactmatch(&flow->l2_mask.ether_type,
845                            sizeof(flow->l2_mask.ether_type))) {
846                 netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
847                 return false;
848         }
849
850         return true;
851 }
852
853 /* Returns the final refcount of the node on success
854  * or a -ve error code on failure
855  */
856 static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
857                                    struct rhashtable *tunnel_table,
858                                    struct rhashtable_params *ht_params,
859                                    struct bnxt_tc_tunnel_node *tunnel_node)
860 {
861         int rc;
862
863         if (--tunnel_node->refcount == 0) {
864                 rc =  rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
865                                              *ht_params);
866                 if (rc) {
867                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
868                         rc = -1;
869                 }
870                 kfree_rcu(tunnel_node, rcu);
871                 return rc;
872         } else {
873                 return tunnel_node->refcount;
874         }
875 }
876
877 /* Get (or add) either encap or decap tunnel node from/to the supplied
878  * hash table.
879  */
880 static struct bnxt_tc_tunnel_node *
881 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
882                         struct rhashtable_params *ht_params,
883                         struct ip_tunnel_key *tun_key)
884 {
885         struct bnxt_tc_tunnel_node *tunnel_node;
886         int rc;
887
888         tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
889         if (!tunnel_node) {
890                 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
891                 if (!tunnel_node) {
892                         rc = -ENOMEM;
893                         goto err;
894                 }
895
896                 tunnel_node->key = *tun_key;
897                 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
898                 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
899                                             *ht_params);
900                 if (rc) {
901                         kfree_rcu(tunnel_node, rcu);
902                         goto err;
903                 }
904         }
905         tunnel_node->refcount++;
906         return tunnel_node;
907 err:
908         netdev_info(bp->dev, "error rc=%d", rc);
909         return NULL;
910 }
911
912 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
913                                         struct bnxt_tc_flow *flow,
914                                         struct bnxt_tc_l2_key *l2_key,
915                                         struct bnxt_tc_flow_node *flow_node,
916                                         __le32 *ref_decap_handle)
917 {
918         struct bnxt_tc_info *tc_info = bp->tc_info;
919         struct bnxt_tc_flow_node *ref_flow_node;
920         struct bnxt_tc_l2_node *decap_l2_node;
921
922         decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
923                                             tc_info->decap_l2_ht_params,
924                                             l2_key);
925         if (!decap_l2_node)
926                 return -1;
927
928         /* If any other flow is using this decap_l2_node, use it's decap_handle
929          * as the ref_decap_handle
930          */
931         if (decap_l2_node->refcount > 0) {
932                 ref_flow_node =
933                         list_first_entry(&decap_l2_node->common_l2_flows,
934                                          struct bnxt_tc_flow_node,
935                                          decap_l2_list_node);
936                 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
937         } else {
938                 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
939         }
940
941         /* Insert the l2_node into the flow_node so that subsequent flows
942          * with a matching decap l2 key can use the decap_filter_handle of
943          * this flow as their ref_decap_handle
944          */
945         flow_node->decap_l2_node = decap_l2_node;
946         list_add(&flow_node->decap_l2_list_node,
947                  &decap_l2_node->common_l2_flows);
948         decap_l2_node->refcount++;
949         return 0;
950 }
951
952 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
953                                       struct bnxt_tc_flow_node *flow_node)
954 {
955         struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
956         struct bnxt_tc_info *tc_info = bp->tc_info;
957         int rc;
958
959         /* remove flow_node from the decap L2 sharing flow list */
960         list_del(&flow_node->decap_l2_list_node);
961         if (--decap_l2_node->refcount == 0) {
962                 rc =  rhashtable_remove_fast(&tc_info->decap_l2_table,
963                                              &decap_l2_node->node,
964                                              tc_info->decap_l2_ht_params);
965                 if (rc)
966                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
967                 kfree_rcu(decap_l2_node, rcu);
968         }
969 }
970
971 static void bnxt_tc_put_decap_handle(struct bnxt *bp,
972                                      struct bnxt_tc_flow_node *flow_node)
973 {
974         __le32 decap_handle = flow_node->decap_node->tunnel_handle;
975         struct bnxt_tc_info *tc_info = bp->tc_info;
976         int rc;
977
978         if (flow_node->decap_l2_node)
979                 bnxt_tc_put_decap_l2_node(bp, flow_node);
980
981         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
982                                      &tc_info->decap_ht_params,
983                                      flow_node->decap_node);
984         if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
985                 hwrm_cfa_decap_filter_free(bp, decap_handle);
986 }
987
988 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
989                                        struct ip_tunnel_key *tun_key,
990                                        struct bnxt_tc_l2_key *l2_info)
991 {
992 #ifdef CONFIG_INET
993         struct net_device *real_dst_dev = bp->dev;
994         struct flowi4 flow = { {0} };
995         struct net_device *dst_dev;
996         struct neighbour *nbr;
997         struct rtable *rt;
998         int rc;
999
1000         flow.flowi4_proto = IPPROTO_UDP;
1001         flow.fl4_dport = tun_key->tp_dst;
1002         flow.daddr = tun_key->u.ipv4.dst;
1003
1004         rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
1005         if (IS_ERR(rt)) {
1006                 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
1007                 return -EOPNOTSUPP;
1008         }
1009
1010         /* The route must either point to the real_dst_dev or a dst_dev that
1011          * uses the real_dst_dev.
1012          */
1013         dst_dev = rt->dst.dev;
1014         if (is_vlan_dev(dst_dev)) {
1015 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1016                 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
1017
1018                 if (vlan->real_dev != real_dst_dev) {
1019                         netdev_info(bp->dev,
1020                                     "dst_dev(%s) doesn't use PF-if(%s)",
1021                                     netdev_name(dst_dev),
1022                                     netdev_name(real_dst_dev));
1023                         rc = -EOPNOTSUPP;
1024                         goto put_rt;
1025                 }
1026                 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
1027                 l2_info->inner_vlan_tpid = vlan->vlan_proto;
1028                 l2_info->num_vlans = 1;
1029 #endif
1030         } else if (dst_dev != real_dst_dev) {
1031                 netdev_info(bp->dev,
1032                             "dst_dev(%s) for %pI4b is not PF-if(%s)",
1033                             netdev_name(dst_dev), &flow.daddr,
1034                             netdev_name(real_dst_dev));
1035                 rc = -EOPNOTSUPP;
1036                 goto put_rt;
1037         }
1038
1039         nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1040         if (!nbr) {
1041                 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1042                             &flow.daddr);
1043                 rc = -EOPNOTSUPP;
1044                 goto put_rt;
1045         }
1046
1047         tun_key->u.ipv4.src = flow.saddr;
1048         tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1049         neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1050         ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1051         neigh_release(nbr);
1052         ip_rt_put(rt);
1053
1054         return 0;
1055 put_rt:
1056         ip_rt_put(rt);
1057         return rc;
1058 #else
1059         return -EOPNOTSUPP;
1060 #endif
1061 }
1062
1063 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1064                                     struct bnxt_tc_flow_node *flow_node,
1065                                     __le32 *decap_filter_handle)
1066 {
1067         struct ip_tunnel_key *decap_key = &flow->tun_key;
1068         struct bnxt_tc_info *tc_info = bp->tc_info;
1069         struct bnxt_tc_l2_key l2_info = { {0} };
1070         struct bnxt_tc_tunnel_node *decap_node;
1071         struct ip_tunnel_key tun_key = { 0 };
1072         struct bnxt_tc_l2_key *decap_l2_info;
1073         __le32 ref_decap_handle;
1074         int rc;
1075
1076         /* Check if there's another flow using the same tunnel decap.
1077          * If not, add this tunnel to the table and resolve the other
1078          * tunnel header fileds. Ignore src_port in the tunnel_key,
1079          * since it is not required for decap filters.
1080          */
1081         decap_key->tp_src = 0;
1082         decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1083                                              &tc_info->decap_ht_params,
1084                                              decap_key);
1085         if (!decap_node)
1086                 return -ENOMEM;
1087
1088         flow_node->decap_node = decap_node;
1089
1090         if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1091                 goto done;
1092
1093         /* Resolve the L2 fields for tunnel decap
1094          * Resolve the route for remote vtep (saddr) of the decap key
1095          * Find it's next-hop mac addrs
1096          */
1097         tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1098         tun_key.tp_dst = flow->tun_key.tp_dst;
1099         rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1100         if (rc)
1101                 goto put_decap;
1102
1103         decap_l2_info = &decap_node->l2_info;
1104         /* decap smac is wildcarded */
1105         ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1106         if (l2_info.num_vlans) {
1107                 decap_l2_info->num_vlans = l2_info.num_vlans;
1108                 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1109                 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1110         }
1111         flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1112
1113         /* For getting a decap_filter_handle we first need to check if
1114          * there are any other decap flows that share the same tunnel L2
1115          * key and if so, pass that flow's decap_filter_handle as the
1116          * ref_decap_handle for this flow.
1117          */
1118         rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1119                                           &ref_decap_handle);
1120         if (rc)
1121                 goto put_decap;
1122
1123         /* Issue the hwrm cmd to allocate a decap filter handle */
1124         rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1125                                          ref_decap_handle,
1126                                          &decap_node->tunnel_handle);
1127         if (rc)
1128                 goto put_decap_l2;
1129
1130 done:
1131         *decap_filter_handle = decap_node->tunnel_handle;
1132         return 0;
1133
1134 put_decap_l2:
1135         bnxt_tc_put_decap_l2_node(bp, flow_node);
1136 put_decap:
1137         bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1138                                 &tc_info->decap_ht_params,
1139                                 flow_node->decap_node);
1140         return rc;
1141 }
1142
1143 static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1144                                      struct bnxt_tc_tunnel_node *encap_node)
1145 {
1146         __le32 encap_handle = encap_node->tunnel_handle;
1147         struct bnxt_tc_info *tc_info = bp->tc_info;
1148         int rc;
1149
1150         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1151                                      &tc_info->encap_ht_params, encap_node);
1152         if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1153                 hwrm_cfa_encap_record_free(bp, encap_handle);
1154 }
1155
1156 /* Lookup the tunnel encap table and check if there's an encap_handle
1157  * alloc'd already.
1158  * If not, query L2 info via a route lookup and issue an encap_record_alloc
1159  * cmd to FW.
1160  */
1161 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1162                                     struct bnxt_tc_flow_node *flow_node,
1163                                     __le32 *encap_handle)
1164 {
1165         struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1166         struct bnxt_tc_info *tc_info = bp->tc_info;
1167         struct bnxt_tc_tunnel_node *encap_node;
1168         int rc;
1169
1170         /* Check if there's another flow using the same tunnel encap.
1171          * If not, add this tunnel to the table and resolve the other
1172          * tunnel header fileds
1173          */
1174         encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1175                                              &tc_info->encap_ht_params,
1176                                              encap_key);
1177         if (!encap_node)
1178                 return -ENOMEM;
1179
1180         flow_node->encap_node = encap_node;
1181
1182         if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1183                 goto done;
1184
1185         rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1186         if (rc)
1187                 goto put_encap;
1188
1189         /* Allocate a new tunnel encap record */
1190         rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1191                                          &encap_node->tunnel_handle);
1192         if (rc)
1193                 goto put_encap;
1194
1195 done:
1196         *encap_handle = encap_node->tunnel_handle;
1197         return 0;
1198
1199 put_encap:
1200         bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1201                                 &tc_info->encap_ht_params, encap_node);
1202         return rc;
1203 }
1204
1205 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1206                                       struct bnxt_tc_flow *flow,
1207                                       struct bnxt_tc_flow_node *flow_node)
1208 {
1209         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1210                 bnxt_tc_put_decap_handle(bp, flow_node);
1211         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1212                 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1213 }
1214
1215 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1216                                      struct bnxt_tc_flow *flow,
1217                                      struct bnxt_tc_flow_node *flow_node,
1218                                      __le32 *tunnel_handle)
1219 {
1220         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1221                 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1222                                                 tunnel_handle);
1223         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1224                 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1225                                                 tunnel_handle);
1226         else
1227                 return 0;
1228 }
1229 static int __bnxt_tc_del_flow(struct bnxt *bp,
1230                               struct bnxt_tc_flow_node *flow_node)
1231 {
1232         struct bnxt_tc_info *tc_info = bp->tc_info;
1233         int rc;
1234
1235         /* send HWRM cmd to free the flow-id */
1236         bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1237
1238         mutex_lock(&tc_info->lock);
1239
1240         /* release references to any tunnel encap/decap nodes */
1241         bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1242
1243         /* release reference to l2 node */
1244         bnxt_tc_put_l2_node(bp, flow_node);
1245
1246         mutex_unlock(&tc_info->lock);
1247
1248         rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1249                                     tc_info->flow_ht_params);
1250         if (rc)
1251                 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1252                            __func__, rc);
1253
1254         kfree_rcu(flow_node, rcu);
1255         return 0;
1256 }
1257
1258 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1259                                 u16 src_fid)
1260 {
1261         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1262                 flow->src_fid = bp->pf.fw_fid;
1263         else
1264                 flow->src_fid = src_fid;
1265 }
1266
1267 /* Add a new flow or replace an existing flow.
1268  * Notes on locking:
1269  * There are essentially two critical sections here.
1270  * 1. while adding a new flow
1271  *    a) lookup l2-key
1272  *    b) issue HWRM cmd and get flow_handle
1273  *    c) link l2-key with flow
1274  * 2. while deleting a flow
1275  *    a) unlinking l2-key from flow
1276  * A lock is needed to protect these two critical sections.
1277  *
1278  * The hash-tables are already protected by the rhashtable API.
1279  */
1280 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1281                             struct tc_cls_flower_offload *tc_flow_cmd)
1282 {
1283         struct bnxt_tc_flow_node *new_node, *old_node;
1284         struct bnxt_tc_info *tc_info = bp->tc_info;
1285         struct bnxt_tc_flow *flow;
1286         __le32 tunnel_handle = 0;
1287         __le16 ref_flow_handle;
1288         int rc;
1289
1290         /* allocate memory for the new flow and it's node */
1291         new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1292         if (!new_node) {
1293                 rc = -ENOMEM;
1294                 goto done;
1295         }
1296         new_node->cookie = tc_flow_cmd->cookie;
1297         flow = &new_node->flow;
1298
1299         rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1300         if (rc)
1301                 goto free_node;
1302
1303         bnxt_tc_set_src_fid(bp, flow, src_fid);
1304
1305         if (!bnxt_tc_can_offload(bp, flow)) {
1306                 rc = -ENOSPC;
1307                 goto free_node;
1308         }
1309
1310         /* If a flow exists with the same cookie, delete it */
1311         old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1312                                           &tc_flow_cmd->cookie,
1313                                           tc_info->flow_ht_params);
1314         if (old_node)
1315                 __bnxt_tc_del_flow(bp, old_node);
1316
1317         /* Check if the L2 part of the flow has been offloaded already.
1318          * If so, bump up it's refcnt and get it's reference handle.
1319          */
1320         mutex_lock(&tc_info->lock);
1321         rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1322         if (rc)
1323                 goto unlock;
1324
1325         /* If the flow involves tunnel encap/decap, get tunnel_handle */
1326         rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1327         if (rc)
1328                 goto put_l2;
1329
1330         /* send HWRM cmd to alloc the flow */
1331         rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1332                                       tunnel_handle, &new_node->flow_handle);
1333         if (rc)
1334                 goto put_tunnel;
1335
1336         flow->lastused = jiffies;
1337         spin_lock_init(&flow->stats_lock);
1338         /* add new flow to flow-table */
1339         rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1340                                     tc_info->flow_ht_params);
1341         if (rc)
1342                 goto hwrm_flow_free;
1343
1344         mutex_unlock(&tc_info->lock);
1345         return 0;
1346
1347 hwrm_flow_free:
1348         bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
1349 put_tunnel:
1350         bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1351 put_l2:
1352         bnxt_tc_put_l2_node(bp, new_node);
1353 unlock:
1354         mutex_unlock(&tc_info->lock);
1355 free_node:
1356         kfree_rcu(new_node, rcu);
1357 done:
1358         netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1359                    __func__, tc_flow_cmd->cookie, rc);
1360         return rc;
1361 }
1362
1363 static int bnxt_tc_del_flow(struct bnxt *bp,
1364                             struct tc_cls_flower_offload *tc_flow_cmd)
1365 {
1366         struct bnxt_tc_info *tc_info = bp->tc_info;
1367         struct bnxt_tc_flow_node *flow_node;
1368
1369         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1370                                            &tc_flow_cmd->cookie,
1371                                            tc_info->flow_ht_params);
1372         if (!flow_node)
1373                 return -EINVAL;
1374
1375         return __bnxt_tc_del_flow(bp, flow_node);
1376 }
1377
1378 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1379                                   struct tc_cls_flower_offload *tc_flow_cmd)
1380 {
1381         struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1382         struct bnxt_tc_info *tc_info = bp->tc_info;
1383         struct bnxt_tc_flow_node *flow_node;
1384         struct bnxt_tc_flow *flow;
1385         unsigned long lastused;
1386
1387         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1388                                            &tc_flow_cmd->cookie,
1389                                            tc_info->flow_ht_params);
1390         if (!flow_node)
1391                 return -1;
1392
1393         flow = &flow_node->flow;
1394         curr_stats = &flow->stats;
1395         prev_stats = &flow->prev_stats;
1396
1397         spin_lock(&flow->stats_lock);
1398         stats.packets = curr_stats->packets - prev_stats->packets;
1399         stats.bytes = curr_stats->bytes - prev_stats->bytes;
1400         *prev_stats = *curr_stats;
1401         lastused = flow->lastused;
1402         spin_unlock(&flow->stats_lock);
1403
1404         tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1405                               lastused);
1406         return 0;
1407 }
1408
1409 static int
1410 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1411                              struct bnxt_tc_stats_batch stats_batch[])
1412 {
1413         struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1414         struct hwrm_cfa_flow_stats_input req = { 0 };
1415         __le16 *req_flow_handles = &req.flow_handle_0;
1416         int rc, i;
1417
1418         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1419         req.num_flows = cpu_to_le16(num_flows);
1420         for (i = 0; i < num_flows; i++) {
1421                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1422
1423                 req_flow_handles[i] = flow_node->flow_handle;
1424         }
1425
1426         mutex_lock(&bp->hwrm_cmd_lock);
1427         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1428         if (!rc) {
1429                 __le64 *resp_packets = &resp->packet_0;
1430                 __le64 *resp_bytes = &resp->byte_0;
1431
1432                 for (i = 0; i < num_flows; i++) {
1433                         stats_batch[i].hw_stats.packets =
1434                                                 le64_to_cpu(resp_packets[i]);
1435                         stats_batch[i].hw_stats.bytes =
1436                                                 le64_to_cpu(resp_bytes[i]);
1437                 }
1438         } else {
1439                 netdev_info(bp->dev, "error rc=%d", rc);
1440         }
1441         mutex_unlock(&bp->hwrm_cmd_lock);
1442
1443         if (rc)
1444                 rc = -EIO;
1445         return rc;
1446 }
1447
1448 /* Add val to accum while handling a possible wraparound
1449  * of val. Eventhough val is of type u64, its actual width
1450  * is denoted by mask and will wrap-around beyond that width.
1451  */
1452 static void accumulate_val(u64 *accum, u64 val, u64 mask)
1453 {
1454 #define low_bits(x, mask)               ((x) & (mask))
1455 #define high_bits(x, mask)              ((x) & ~(mask))
1456         bool wrapped = val < low_bits(*accum, mask);
1457
1458         *accum = high_bits(*accum, mask) + val;
1459         if (wrapped)
1460                 *accum += (mask + 1);
1461 }
1462
1463 /* The HW counters' width is much less than 64bits.
1464  * Handle possible wrap-around while updating the stat counters
1465  */
1466 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1467                                   struct bnxt_tc_flow_stats *acc_stats,
1468                                   struct bnxt_tc_flow_stats *hw_stats)
1469 {
1470         accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1471         accumulate_val(&acc_stats->packets, hw_stats->packets,
1472                        tc_info->packets_mask);
1473 }
1474
1475 static int
1476 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1477                                 struct bnxt_tc_stats_batch stats_batch[])
1478 {
1479         struct bnxt_tc_info *tc_info = bp->tc_info;
1480         int rc, i;
1481
1482         rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1483         if (rc)
1484                 return rc;
1485
1486         for (i = 0; i < num_flows; i++) {
1487                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1488                 struct bnxt_tc_flow *flow = &flow_node->flow;
1489
1490                 spin_lock(&flow->stats_lock);
1491                 bnxt_flow_stats_accum(tc_info, &flow->stats,
1492                                       &stats_batch[i].hw_stats);
1493                 if (flow->stats.packets != flow->prev_stats.packets)
1494                         flow->lastused = jiffies;
1495                 spin_unlock(&flow->stats_lock);
1496         }
1497
1498         return 0;
1499 }
1500
1501 static int
1502 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1503                               struct bnxt_tc_stats_batch stats_batch[],
1504                               int *num_flows)
1505 {
1506         struct bnxt_tc_info *tc_info = bp->tc_info;
1507         struct rhashtable_iter *iter = &tc_info->iter;
1508         void *flow_node;
1509         int rc, i;
1510
1511         rhashtable_walk_start(iter);
1512
1513         rc = 0;
1514         for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1515                 flow_node = rhashtable_walk_next(iter);
1516                 if (IS_ERR(flow_node)) {
1517                         i = 0;
1518                         if (PTR_ERR(flow_node) == -EAGAIN) {
1519                                 continue;
1520                         } else {
1521                                 rc = PTR_ERR(flow_node);
1522                                 goto done;
1523                         }
1524                 }
1525
1526                 /* No more flows */
1527                 if (!flow_node)
1528                         goto done;
1529
1530                 stats_batch[i].flow_node = flow_node;
1531         }
1532 done:
1533         rhashtable_walk_stop(iter);
1534         *num_flows = i;
1535         return rc;
1536 }
1537
1538 void bnxt_tc_flow_stats_work(struct bnxt *bp)
1539 {
1540         struct bnxt_tc_info *tc_info = bp->tc_info;
1541         int num_flows, rc;
1542
1543         num_flows = atomic_read(&tc_info->flow_table.nelems);
1544         if (!num_flows)
1545                 return;
1546
1547         rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1548
1549         for (;;) {
1550                 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1551                                                    &num_flows);
1552                 if (rc) {
1553                         if (rc == -EAGAIN)
1554                                 continue;
1555                         break;
1556                 }
1557
1558                 if (!num_flows)
1559                         break;
1560
1561                 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1562                                                 tc_info->stats_batch);
1563         }
1564
1565         rhashtable_walk_exit(&tc_info->iter);
1566 }
1567
1568 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1569                          struct tc_cls_flower_offload *cls_flower)
1570 {
1571         int rc = 0;
1572
1573         switch (cls_flower->command) {
1574         case TC_CLSFLOWER_REPLACE:
1575                 rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
1576                 break;
1577
1578         case TC_CLSFLOWER_DESTROY:
1579                 rc = bnxt_tc_del_flow(bp, cls_flower);
1580                 break;
1581
1582         case TC_CLSFLOWER_STATS:
1583                 rc = bnxt_tc_get_flow_stats(bp, cls_flower);
1584                 break;
1585         }
1586         return rc;
1587 }
1588
1589 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1590         .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1591         .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1592         .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1593         .automatic_shrinking = true
1594 };
1595
1596 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1597         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1598         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1599         .key_len = BNXT_TC_L2_KEY_LEN,
1600         .automatic_shrinking = true
1601 };
1602
1603 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1604         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1605         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1606         .key_len = BNXT_TC_L2_KEY_LEN,
1607         .automatic_shrinking = true
1608 };
1609
1610 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1611         .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1612         .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1613         .key_len = sizeof(struct ip_tunnel_key),
1614         .automatic_shrinking = true
1615 };
1616
1617 /* convert counter width in bits to a mask */
1618 #define mask(width)             ((u64)~0 >> (64 - (width)))
1619
1620 int bnxt_init_tc(struct bnxt *bp)
1621 {
1622         struct bnxt_tc_info *tc_info;
1623         int rc;
1624
1625         if (bp->hwrm_spec_code < 0x10803) {
1626                 netdev_warn(bp->dev,
1627                             "Firmware does not support TC flower offload.\n");
1628                 return -ENOTSUPP;
1629         }
1630
1631         tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1632         if (!tc_info)
1633                 return -ENOMEM;
1634         mutex_init(&tc_info->lock);
1635
1636         /* Counter widths are programmed by FW */
1637         tc_info->bytes_mask = mask(36);
1638         tc_info->packets_mask = mask(28);
1639
1640         tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1641         rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1642         if (rc)
1643                 goto free_tc_info;
1644
1645         tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1646         rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1647         if (rc)
1648                 goto destroy_flow_table;
1649
1650         tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1651         rc = rhashtable_init(&tc_info->decap_l2_table,
1652                              &tc_info->decap_l2_ht_params);
1653         if (rc)
1654                 goto destroy_l2_table;
1655
1656         tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1657         rc = rhashtable_init(&tc_info->decap_table,
1658                              &tc_info->decap_ht_params);
1659         if (rc)
1660                 goto destroy_decap_l2_table;
1661
1662         tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1663         rc = rhashtable_init(&tc_info->encap_table,
1664                              &tc_info->encap_ht_params);
1665         if (rc)
1666                 goto destroy_decap_table;
1667
1668         tc_info->enabled = true;
1669         bp->dev->hw_features |= NETIF_F_HW_TC;
1670         bp->dev->features |= NETIF_F_HW_TC;
1671         bp->tc_info = tc_info;
1672         return 0;
1673
1674 destroy_decap_table:
1675         rhashtable_destroy(&tc_info->decap_table);
1676 destroy_decap_l2_table:
1677         rhashtable_destroy(&tc_info->decap_l2_table);
1678 destroy_l2_table:
1679         rhashtable_destroy(&tc_info->l2_table);
1680 destroy_flow_table:
1681         rhashtable_destroy(&tc_info->flow_table);
1682 free_tc_info:
1683         kfree(tc_info);
1684         return rc;
1685 }
1686
1687 void bnxt_shutdown_tc(struct bnxt *bp)
1688 {
1689         struct bnxt_tc_info *tc_info = bp->tc_info;
1690
1691         if (!bnxt_tc_flower_enabled(bp))
1692                 return;
1693
1694         rhashtable_destroy(&tc_info->flow_table);
1695         rhashtable_destroy(&tc_info->l2_table);
1696         rhashtable_destroy(&tc_info->decap_l2_table);
1697         rhashtable_destroy(&tc_info->decap_table);
1698         rhashtable_destroy(&tc_info->encap_table);
1699         kfree(tc_info);
1700         bp->tc_info = NULL;
1701 }