Merge tag 'for-linus-5.7-1' of git://github.com/cminyard/linux-ipmi
[linux-2.6-microblaze.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_tc_flower.c
1 /*
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
39
40 #include "cxgb4.h"
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
43
44 #define STATS_CHECK_PERIOD (HZ / 2)
45
46 static struct ch_tc_pedit_fields pedits[] = {
47         PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48         PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49         PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50         PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51         PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52         PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53         PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54         PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55         PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56         PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57         PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58         PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59         PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60         PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61         PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62         PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63         PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64         PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 };
66
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
68 {
69         struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70         if (new)
71                 spin_lock_init(&new->lock);
72         return new;
73 }
74
75 /* Must be called with either RTNL or rcu_read_lock */
76 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
77                                                    unsigned long flower_cookie)
78 {
79         return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
80                                       adap->flower_ht_params);
81 }
82
83 static void cxgb4_process_flow_match(struct net_device *dev,
84                                      struct flow_cls_offload *cls,
85                                      struct ch_filter_specification *fs)
86 {
87         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
88         u16 addr_type = 0;
89
90         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
91                 struct flow_match_control match;
92
93                 flow_rule_match_control(rule, &match);
94                 addr_type = match.key->addr_type;
95         }
96
97         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
98                 struct flow_match_basic match;
99                 u16 ethtype_key, ethtype_mask;
100
101                 flow_rule_match_basic(rule, &match);
102                 ethtype_key = ntohs(match.key->n_proto);
103                 ethtype_mask = ntohs(match.mask->n_proto);
104
105                 if (ethtype_key == ETH_P_ALL) {
106                         ethtype_key = 0;
107                         ethtype_mask = 0;
108                 }
109
110                 if (ethtype_key == ETH_P_IPV6)
111                         fs->type = 1;
112
113                 fs->val.ethtype = ethtype_key;
114                 fs->mask.ethtype = ethtype_mask;
115                 fs->val.proto = match.key->ip_proto;
116                 fs->mask.proto = match.mask->ip_proto;
117         }
118
119         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
120                 struct flow_match_ipv4_addrs match;
121
122                 flow_rule_match_ipv4_addrs(rule, &match);
123                 fs->type = 0;
124                 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
125                 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
126                 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
127                 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
128
129                 /* also initialize nat_lip/fip to same values */
130                 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
131                 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
132         }
133
134         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
135                 struct flow_match_ipv6_addrs match;
136
137                 flow_rule_match_ipv6_addrs(rule, &match);
138                 fs->type = 1;
139                 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
140                        sizeof(match.key->dst));
141                 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
142                        sizeof(match.key->src));
143                 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
144                        sizeof(match.mask->dst));
145                 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
146                        sizeof(match.mask->src));
147
148                 /* also initialize nat_lip/fip to same values */
149                 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
150                        sizeof(match.key->dst));
151                 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
152                        sizeof(match.key->src));
153         }
154
155         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
156                 struct flow_match_ports match;
157
158                 flow_rule_match_ports(rule, &match);
159                 fs->val.lport = cpu_to_be16(match.key->dst);
160                 fs->mask.lport = cpu_to_be16(match.mask->dst);
161                 fs->val.fport = cpu_to_be16(match.key->src);
162                 fs->mask.fport = cpu_to_be16(match.mask->src);
163
164                 /* also initialize nat_lport/fport to same values */
165                 fs->nat_lport = cpu_to_be16(match.key->dst);
166                 fs->nat_fport = cpu_to_be16(match.key->src);
167         }
168
169         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
170                 struct flow_match_ip match;
171
172                 flow_rule_match_ip(rule, &match);
173                 fs->val.tos = match.key->tos;
174                 fs->mask.tos = match.mask->tos;
175         }
176
177         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
178                 struct flow_match_enc_keyid match;
179
180                 flow_rule_match_enc_keyid(rule, &match);
181                 fs->val.vni = be32_to_cpu(match.key->keyid);
182                 fs->mask.vni = be32_to_cpu(match.mask->keyid);
183                 if (fs->mask.vni) {
184                         fs->val.encap_vld = 1;
185                         fs->mask.encap_vld = 1;
186                 }
187         }
188
189         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
190                 struct flow_match_vlan match;
191                 u16 vlan_tci, vlan_tci_mask;
192
193                 flow_rule_match_vlan(rule, &match);
194                 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
195                                                VLAN_PRIO_SHIFT);
196                 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
197                                                      VLAN_PRIO_SHIFT);
198                 fs->val.ivlan = vlan_tci;
199                 fs->mask.ivlan = vlan_tci_mask;
200
201                 fs->val.ivlan_vld = 1;
202                 fs->mask.ivlan_vld = 1;
203
204                 /* Chelsio adapters use ivlan_vld bit to match vlan packets
205                  * as 802.1Q. Also, when vlan tag is present in packets,
206                  * ethtype match is used then to match on ethtype of inner
207                  * header ie. the header following the vlan header.
208                  * So, set the ivlan_vld based on ethtype info supplied by
209                  * TC for vlan packets if its 802.1Q. And then reset the
210                  * ethtype value else, hw will try to match the supplied
211                  * ethtype value with ethtype of inner header.
212                  */
213                 if (fs->val.ethtype == ETH_P_8021Q) {
214                         fs->val.ethtype = 0;
215                         fs->mask.ethtype = 0;
216                 }
217         }
218
219         /* Match only packets coming from the ingress port where this
220          * filter will be created.
221          */
222         fs->val.iport = netdev2pinfo(dev)->port_id;
223         fs->mask.iport = ~0;
224 }
225
226 static int cxgb4_validate_flow_match(struct net_device *dev,
227                                      struct flow_cls_offload *cls)
228 {
229         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
230         struct flow_dissector *dissector = rule->match.dissector;
231         u16 ethtype_mask = 0;
232         u16 ethtype_key = 0;
233
234         if (dissector->used_keys &
235             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
236               BIT(FLOW_DISSECTOR_KEY_BASIC) |
237               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
238               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
239               BIT(FLOW_DISSECTOR_KEY_PORTS) |
240               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
241               BIT(FLOW_DISSECTOR_KEY_VLAN) |
242               BIT(FLOW_DISSECTOR_KEY_IP))) {
243                 netdev_warn(dev, "Unsupported key used: 0x%x\n",
244                             dissector->used_keys);
245                 return -EOPNOTSUPP;
246         }
247
248         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
249                 struct flow_match_basic match;
250
251                 flow_rule_match_basic(rule, &match);
252                 ethtype_key = ntohs(match.key->n_proto);
253                 ethtype_mask = ntohs(match.mask->n_proto);
254         }
255
256         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
257                 u16 eth_ip_type = ethtype_key & ethtype_mask;
258                 struct flow_match_ip match;
259
260                 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
261                         netdev_err(dev, "IP Key supported only with IPv4/v6");
262                         return -EINVAL;
263                 }
264
265                 flow_rule_match_ip(rule, &match);
266                 if (match.mask->ttl) {
267                         netdev_warn(dev, "ttl match unsupported for offload");
268                         return -EOPNOTSUPP;
269                 }
270         }
271
272         return 0;
273 }
274
275 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
276                           u8 field)
277 {
278         u32 set_val = val & ~mask;
279         u32 offset = 0;
280         u8 size = 1;
281         int i;
282
283         for (i = 0; i < ARRAY_SIZE(pedits); i++) {
284                 if (pedits[i].field == field) {
285                         offset = pedits[i].offset;
286                         size = pedits[i].size;
287                         break;
288                 }
289         }
290         memcpy((u8 *)fs + offset, &set_val, size);
291 }
292
293 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
294                                 u32 mask, u32 offset, u8 htype)
295 {
296         switch (htype) {
297         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
298                 switch (offset) {
299                 case PEDIT_ETH_DMAC_31_0:
300                         fs->newdmac = 1;
301                         offload_pedit(fs, val, mask, ETH_DMAC_31_0);
302                         break;
303                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
304                         if (~mask & PEDIT_ETH_DMAC_MASK)
305                                 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
306                         else
307                                 offload_pedit(fs, val >> 16, mask >> 16,
308                                               ETH_SMAC_15_0);
309                         break;
310                 case PEDIT_ETH_SMAC_47_16:
311                         fs->newsmac = 1;
312                         offload_pedit(fs, val, mask, ETH_SMAC_47_16);
313                 }
314                 break;
315         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
316                 switch (offset) {
317                 case PEDIT_IP4_SRC:
318                         offload_pedit(fs, val, mask, IP4_SRC);
319                         break;
320                 case PEDIT_IP4_DST:
321                         offload_pedit(fs, val, mask, IP4_DST);
322                 }
323                 fs->nat_mode = NAT_MODE_ALL;
324                 break;
325         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
326                 switch (offset) {
327                 case PEDIT_IP6_SRC_31_0:
328                         offload_pedit(fs, val, mask, IP6_SRC_31_0);
329                         break;
330                 case PEDIT_IP6_SRC_63_32:
331                         offload_pedit(fs, val, mask, IP6_SRC_63_32);
332                         break;
333                 case PEDIT_IP6_SRC_95_64:
334                         offload_pedit(fs, val, mask, IP6_SRC_95_64);
335                         break;
336                 case PEDIT_IP6_SRC_127_96:
337                         offload_pedit(fs, val, mask, IP6_SRC_127_96);
338                         break;
339                 case PEDIT_IP6_DST_31_0:
340                         offload_pedit(fs, val, mask, IP6_DST_31_0);
341                         break;
342                 case PEDIT_IP6_DST_63_32:
343                         offload_pedit(fs, val, mask, IP6_DST_63_32);
344                         break;
345                 case PEDIT_IP6_DST_95_64:
346                         offload_pedit(fs, val, mask, IP6_DST_95_64);
347                         break;
348                 case PEDIT_IP6_DST_127_96:
349                         offload_pedit(fs, val, mask, IP6_DST_127_96);
350                 }
351                 fs->nat_mode = NAT_MODE_ALL;
352                 break;
353         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
354                 switch (offset) {
355                 case PEDIT_TCP_SPORT_DPORT:
356                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
357                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
358                                               cpu_to_be32(mask) >> 16,
359                                               TCP_SPORT);
360                         else
361                                 offload_pedit(fs, cpu_to_be32(val),
362                                               cpu_to_be32(mask), TCP_DPORT);
363                 }
364                 fs->nat_mode = NAT_MODE_ALL;
365                 break;
366         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
367                 switch (offset) {
368                 case PEDIT_UDP_SPORT_DPORT:
369                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
370                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
371                                               cpu_to_be32(mask) >> 16,
372                                               UDP_SPORT);
373                         else
374                                 offload_pedit(fs, cpu_to_be32(val),
375                                               cpu_to_be32(mask), UDP_DPORT);
376                 }
377                 fs->nat_mode = NAT_MODE_ALL;
378         }
379 }
380
381 void cxgb4_process_flow_actions(struct net_device *in,
382                                 struct flow_action *actions,
383                                 struct ch_filter_specification *fs)
384 {
385         struct flow_action_entry *act;
386         int i;
387
388         flow_action_for_each(i, act, actions) {
389                 switch (act->id) {
390                 case FLOW_ACTION_ACCEPT:
391                         fs->action = FILTER_PASS;
392                         break;
393                 case FLOW_ACTION_DROP:
394                         fs->action = FILTER_DROP;
395                         break;
396                 case FLOW_ACTION_REDIRECT: {
397                         struct net_device *out = act->dev;
398                         struct port_info *pi = netdev_priv(out);
399
400                         fs->action = FILTER_SWITCH;
401                         fs->eport = pi->port_id;
402                         }
403                         break;
404                 case FLOW_ACTION_VLAN_POP:
405                 case FLOW_ACTION_VLAN_PUSH:
406                 case FLOW_ACTION_VLAN_MANGLE: {
407                         u8 prio = act->vlan.prio;
408                         u16 vid = act->vlan.vid;
409                         u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
410                         switch (act->id) {
411                         case FLOW_ACTION_VLAN_POP:
412                                 fs->newvlan |= VLAN_REMOVE;
413                                 break;
414                         case FLOW_ACTION_VLAN_PUSH:
415                                 fs->newvlan |= VLAN_INSERT;
416                                 fs->vlan = vlan_tci;
417                                 break;
418                         case FLOW_ACTION_VLAN_MANGLE:
419                                 fs->newvlan |= VLAN_REWRITE;
420                                 fs->vlan = vlan_tci;
421                                 break;
422                         default:
423                                 break;
424                         }
425                         }
426                         break;
427                 case FLOW_ACTION_MANGLE: {
428                         u32 mask, val, offset;
429                         u8 htype;
430
431                         htype = act->mangle.htype;
432                         mask = act->mangle.mask;
433                         val = act->mangle.val;
434                         offset = act->mangle.offset;
435
436                         process_pedit_field(fs, val, mask, offset, htype);
437                         }
438                         break;
439                 default:
440                         break;
441                 }
442         }
443 }
444
445 static bool valid_l4_mask(u32 mask)
446 {
447         u16 hi, lo;
448
449         /* Either the upper 16-bits (SPORT) OR the lower
450          * 16-bits (DPORT) can be set, but NOT BOTH.
451          */
452         hi = (mask >> 16) & 0xFFFF;
453         lo = mask & 0xFFFF;
454
455         return hi && lo ? false : true;
456 }
457
458 static bool valid_pedit_action(struct net_device *dev,
459                                const struct flow_action_entry *act)
460 {
461         u32 mask, offset;
462         u8 htype;
463
464         htype = act->mangle.htype;
465         mask = act->mangle.mask;
466         offset = act->mangle.offset;
467
468         switch (htype) {
469         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
470                 switch (offset) {
471                 case PEDIT_ETH_DMAC_31_0:
472                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
473                 case PEDIT_ETH_SMAC_47_16:
474                         break;
475                 default:
476                         netdev_err(dev, "%s: Unsupported pedit field\n",
477                                    __func__);
478                         return false;
479                 }
480                 break;
481         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
482                 switch (offset) {
483                 case PEDIT_IP4_SRC:
484                 case PEDIT_IP4_DST:
485                         break;
486                 default:
487                         netdev_err(dev, "%s: Unsupported pedit field\n",
488                                    __func__);
489                         return false;
490                 }
491                 break;
492         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
493                 switch (offset) {
494                 case PEDIT_IP6_SRC_31_0:
495                 case PEDIT_IP6_SRC_63_32:
496                 case PEDIT_IP6_SRC_95_64:
497                 case PEDIT_IP6_SRC_127_96:
498                 case PEDIT_IP6_DST_31_0:
499                 case PEDIT_IP6_DST_63_32:
500                 case PEDIT_IP6_DST_95_64:
501                 case PEDIT_IP6_DST_127_96:
502                         break;
503                 default:
504                         netdev_err(dev, "%s: Unsupported pedit field\n",
505                                    __func__);
506                         return false;
507                 }
508                 break;
509         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
510                 switch (offset) {
511                 case PEDIT_TCP_SPORT_DPORT:
512                         if (!valid_l4_mask(~mask)) {
513                                 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
514                                            __func__);
515                                 return false;
516                         }
517                         break;
518                 default:
519                         netdev_err(dev, "%s: Unsupported pedit field\n",
520                                    __func__);
521                         return false;
522                 }
523                 break;
524         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
525                 switch (offset) {
526                 case PEDIT_UDP_SPORT_DPORT:
527                         if (!valid_l4_mask(~mask)) {
528                                 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
529                                            __func__);
530                                 return false;
531                         }
532                         break;
533                 default:
534                         netdev_err(dev, "%s: Unsupported pedit field\n",
535                                    __func__);
536                         return false;
537                 }
538                 break;
539         default:
540                 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
541                 return false;
542         }
543         return true;
544 }
545
546 int cxgb4_validate_flow_actions(struct net_device *dev,
547                                 struct flow_action *actions,
548                                 struct netlink_ext_ack *extack)
549 {
550         struct flow_action_entry *act;
551         bool act_redir = false;
552         bool act_pedit = false;
553         bool act_vlan = false;
554         int i;
555
556         if (!flow_action_basic_hw_stats_check(actions, extack))
557                 return -EOPNOTSUPP;
558
559         flow_action_for_each(i, act, actions) {
560                 switch (act->id) {
561                 case FLOW_ACTION_ACCEPT:
562                 case FLOW_ACTION_DROP:
563                         /* Do nothing */
564                         break;
565                 case FLOW_ACTION_REDIRECT: {
566                         struct adapter *adap = netdev2adap(dev);
567                         struct net_device *n_dev, *target_dev;
568                         unsigned int i;
569                         bool found = false;
570
571                         target_dev = act->dev;
572                         for_each_port(adap, i) {
573                                 n_dev = adap->port[i];
574                                 if (target_dev == n_dev) {
575                                         found = true;
576                                         break;
577                                 }
578                         }
579
580                         /* If interface doesn't belong to our hw, then
581                          * the provided output port is not valid
582                          */
583                         if (!found) {
584                                 netdev_err(dev, "%s: Out port invalid\n",
585                                            __func__);
586                                 return -EINVAL;
587                         }
588                         act_redir = true;
589                         }
590                         break;
591                 case FLOW_ACTION_VLAN_POP:
592                 case FLOW_ACTION_VLAN_PUSH:
593                 case FLOW_ACTION_VLAN_MANGLE: {
594                         u16 proto = be16_to_cpu(act->vlan.proto);
595
596                         switch (act->id) {
597                         case FLOW_ACTION_VLAN_POP:
598                                 break;
599                         case FLOW_ACTION_VLAN_PUSH:
600                         case FLOW_ACTION_VLAN_MANGLE:
601                                 if (proto != ETH_P_8021Q) {
602                                         netdev_err(dev, "%s: Unsupported vlan proto\n",
603                                                    __func__);
604                                         return -EOPNOTSUPP;
605                                 }
606                                 break;
607                         default:
608                                 netdev_err(dev, "%s: Unsupported vlan action\n",
609                                            __func__);
610                                 return -EOPNOTSUPP;
611                         }
612                         act_vlan = true;
613                         }
614                         break;
615                 case FLOW_ACTION_MANGLE: {
616                         bool pedit_valid = valid_pedit_action(dev, act);
617
618                         if (!pedit_valid)
619                                 return -EOPNOTSUPP;
620                         act_pedit = true;
621                         }
622                         break;
623                 default:
624                         netdev_err(dev, "%s: Unsupported action\n", __func__);
625                         return -EOPNOTSUPP;
626                 }
627         }
628
629         if ((act_pedit || act_vlan) && !act_redir) {
630                 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
631                            __func__);
632                 return -EINVAL;
633         }
634
635         return 0;
636 }
637
638 static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
639 {
640         spin_lock_bh(&adap->tids.ftid_lock);
641         if (adap->tids.tc_hash_tids_max_prio < tc_prio)
642                 adap->tids.tc_hash_tids_max_prio = tc_prio;
643         spin_unlock_bh(&adap->tids.ftid_lock);
644 }
645
646 static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
647 {
648         struct tid_info *t = &adap->tids;
649         struct ch_tc_flower_entry *fe;
650         struct rhashtable_iter iter;
651         u32 found = 0;
652
653         spin_lock_bh(&t->ftid_lock);
654         /* Bail if the current rule is not the one with the max
655          * prio.
656          */
657         if (t->tc_hash_tids_max_prio != tc_prio)
658                 goto out_unlock;
659
660         /* Search for the next rule having the same or next lower
661          * max prio.
662          */
663         rhashtable_walk_enter(&adap->flower_tbl, &iter);
664         do {
665                 rhashtable_walk_start(&iter);
666
667                 fe = rhashtable_walk_next(&iter);
668                 while (!IS_ERR_OR_NULL(fe)) {
669                         if (fe->fs.hash &&
670                             fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
671                                 t->tc_hash_tids_max_prio = fe->fs.tc_prio;
672                                 found++;
673
674                                 /* Bail if we found another rule
675                                  * having the same prio as the
676                                  * current max one.
677                                  */
678                                 if (fe->fs.tc_prio == tc_prio)
679                                         break;
680                         }
681
682                         fe = rhashtable_walk_next(&iter);
683                 }
684
685                 rhashtable_walk_stop(&iter);
686         } while (fe == ERR_PTR(-EAGAIN));
687         rhashtable_walk_exit(&iter);
688
689         if (!found)
690                 t->tc_hash_tids_max_prio = 0;
691
692 out_unlock:
693         spin_unlock_bh(&t->ftid_lock);
694 }
695
696 int cxgb4_tc_flower_replace(struct net_device *dev,
697                             struct flow_cls_offload *cls)
698 {
699         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
700         struct netlink_ext_ack *extack = cls->common.extack;
701         struct adapter *adap = netdev2adap(dev);
702         struct ch_tc_flower_entry *ch_flower;
703         struct ch_filter_specification *fs;
704         struct filter_ctx ctx;
705         u8 inet_family;
706         int fidx, ret;
707
708         if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
709                 return -EOPNOTSUPP;
710
711         if (cxgb4_validate_flow_match(dev, cls))
712                 return -EOPNOTSUPP;
713
714         ch_flower = allocate_flower_entry();
715         if (!ch_flower) {
716                 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
717                 return -ENOMEM;
718         }
719
720         fs = &ch_flower->fs;
721         fs->hitcnts = 1;
722         cxgb4_process_flow_match(dev, cls, fs);
723         cxgb4_process_flow_actions(dev, &rule->action, fs);
724
725         fs->hash = is_filter_exact_match(adap, fs);
726         inet_family = fs->type ? PF_INET6 : PF_INET;
727
728         /* Get a free filter entry TID, where we can insert this new
729          * rule. Only insert rule if its prio doesn't conflict with
730          * existing rules.
731          */
732         fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
733                                    cls->common.prio);
734         if (fidx < 0) {
735                 NL_SET_ERR_MSG_MOD(extack,
736                                    "No free LETCAM index available");
737                 ret = -ENOMEM;
738                 goto free_entry;
739         }
740
741         if (fidx < adap->tids.nhpftids) {
742                 fs->prio = 1;
743                 fs->hash = 0;
744         }
745
746         /* If the rule can be inserted into HASH region, then ignore
747          * the index to normal FILTER region.
748          */
749         if (fs->hash)
750                 fidx = 0;
751
752         fs->tc_prio = cls->common.prio;
753         fs->tc_cookie = cls->cookie;
754
755         init_completion(&ctx.completion);
756         ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
757         if (ret) {
758                 netdev_err(dev, "%s: filter creation err %d\n",
759                            __func__, ret);
760                 goto free_entry;
761         }
762
763         /* Wait for reply */
764         ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
765         if (!ret) {
766                 ret = -ETIMEDOUT;
767                 goto free_entry;
768         }
769
770         ret = ctx.result;
771         /* Check if hw returned error for filter creation */
772         if (ret)
773                 goto free_entry;
774
775         ch_flower->tc_flower_cookie = cls->cookie;
776         ch_flower->filter_id = ctx.tid;
777         ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
778                                      adap->flower_ht_params);
779         if (ret)
780                 goto del_filter;
781
782         if (fs->hash)
783                 cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio);
784
785         return 0;
786
787 del_filter:
788         cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
789
790 free_entry:
791         kfree(ch_flower);
792         return ret;
793 }
794
795 int cxgb4_tc_flower_destroy(struct net_device *dev,
796                             struct flow_cls_offload *cls)
797 {
798         struct adapter *adap = netdev2adap(dev);
799         struct ch_tc_flower_entry *ch_flower;
800         u32 tc_prio;
801         bool hash;
802         int ret;
803
804         ch_flower = ch_flower_lookup(adap, cls->cookie);
805         if (!ch_flower)
806                 return -ENOENT;
807
808         hash = ch_flower->fs.hash;
809         tc_prio = ch_flower->fs.tc_prio;
810
811         ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
812         if (ret)
813                 goto err;
814
815         ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
816                                      adap->flower_ht_params);
817         if (ret) {
818                 netdev_err(dev, "Flow remove from rhashtable failed");
819                 goto err;
820         }
821         kfree_rcu(ch_flower, rcu);
822
823         if (hash)
824                 cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
825
826 err:
827         return ret;
828 }
829
830 static void ch_flower_stats_handler(struct work_struct *work)
831 {
832         struct adapter *adap = container_of(work, struct adapter,
833                                             flower_stats_work);
834         struct ch_tc_flower_entry *flower_entry;
835         struct ch_tc_flower_stats *ofld_stats;
836         struct rhashtable_iter iter;
837         u64 packets;
838         u64 bytes;
839         int ret;
840
841         rhashtable_walk_enter(&adap->flower_tbl, &iter);
842         do {
843                 rhashtable_walk_start(&iter);
844
845                 while ((flower_entry = rhashtable_walk_next(&iter)) &&
846                        !IS_ERR(flower_entry)) {
847                         ret = cxgb4_get_filter_counters(adap->port[0],
848                                                         flower_entry->filter_id,
849                                                         &packets, &bytes,
850                                                         flower_entry->fs.hash);
851                         if (!ret) {
852                                 spin_lock(&flower_entry->lock);
853                                 ofld_stats = &flower_entry->stats;
854
855                                 if (ofld_stats->prev_packet_count != packets) {
856                                         ofld_stats->prev_packet_count = packets;
857                                         ofld_stats->last_used = jiffies;
858                                 }
859                                 spin_unlock(&flower_entry->lock);
860                         }
861                 }
862
863                 rhashtable_walk_stop(&iter);
864
865         } while (flower_entry == ERR_PTR(-EAGAIN));
866         rhashtable_walk_exit(&iter);
867         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
868 }
869
870 static void ch_flower_stats_cb(struct timer_list *t)
871 {
872         struct adapter *adap = from_timer(adap, t, flower_stats_timer);
873
874         schedule_work(&adap->flower_stats_work);
875 }
876
877 int cxgb4_tc_flower_stats(struct net_device *dev,
878                           struct flow_cls_offload *cls)
879 {
880         struct adapter *adap = netdev2adap(dev);
881         struct ch_tc_flower_stats *ofld_stats;
882         struct ch_tc_flower_entry *ch_flower;
883         u64 packets;
884         u64 bytes;
885         int ret;
886
887         ch_flower = ch_flower_lookup(adap, cls->cookie);
888         if (!ch_flower) {
889                 ret = -ENOENT;
890                 goto err;
891         }
892
893         ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
894                                         &packets, &bytes,
895                                         ch_flower->fs.hash);
896         if (ret < 0)
897                 goto err;
898
899         spin_lock_bh(&ch_flower->lock);
900         ofld_stats = &ch_flower->stats;
901         if (ofld_stats->packet_count != packets) {
902                 if (ofld_stats->prev_packet_count != packets)
903                         ofld_stats->last_used = jiffies;
904                 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
905                                   packets - ofld_stats->packet_count,
906                                   ofld_stats->last_used,
907                                   FLOW_ACTION_HW_STATS_IMMEDIATE);
908
909                 ofld_stats->packet_count = packets;
910                 ofld_stats->byte_count = bytes;
911                 ofld_stats->prev_packet_count = packets;
912         }
913         spin_unlock_bh(&ch_flower->lock);
914         return 0;
915
916 err:
917         return ret;
918 }
919
920 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
921         .nelem_hint = 384,
922         .head_offset = offsetof(struct ch_tc_flower_entry, node),
923         .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
924         .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
925         .max_size = 524288,
926         .min_size = 512,
927         .automatic_shrinking = true
928 };
929
930 int cxgb4_init_tc_flower(struct adapter *adap)
931 {
932         int ret;
933
934         if (adap->tc_flower_initialized)
935                 return -EEXIST;
936
937         adap->flower_ht_params = cxgb4_tc_flower_ht_params;
938         ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
939         if (ret)
940                 return ret;
941
942         INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
943         timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
944         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
945         adap->tc_flower_initialized = true;
946         return 0;
947 }
948
949 void cxgb4_cleanup_tc_flower(struct adapter *adap)
950 {
951         if (!adap->tc_flower_initialized)
952                 return;
953
954         if (adap->flower_stats_timer.function)
955                 del_timer_sync(&adap->flower_stats_timer);
956         cancel_work_sync(&adap->flower_stats_work);
957         rhashtable_destroy(&adap->flower_tbl);
958         adap->tc_flower_initialized = false;
959 }