Merge tag 'arc-5.2-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc_tun.c
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
3
4 #include <net/vxlan.h>
5 #include <net/gre.h>
6 #include "lib/vxlan.h"
7 #include "en/tc_tun.h"
8
9 static int get_route_and_out_devs(struct mlx5e_priv *priv,
10                                   struct net_device *dev,
11                                   struct net_device **route_dev,
12                                   struct net_device **out_dev)
13 {
14         struct net_device *uplink_dev, *uplink_upper, *real_dev;
15         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
16         bool dst_is_lag_dev;
17
18         real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev;
19         uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
20         uplink_upper = netdev_master_upper_dev_get(uplink_dev);
21         dst_is_lag_dev = (uplink_upper &&
22                           netif_is_lag_master(uplink_upper) &&
23                           real_dev == uplink_upper &&
24                           mlx5_lag_is_sriov(priv->mdev));
25
26         /* if the egress device isn't on the same HW e-switch or
27          * it's a LAG device, use the uplink
28          */
29         if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
30             dst_is_lag_dev) {
31                 *route_dev = dev;
32                 *out_dev = uplink_dev;
33         } else {
34                 *route_dev = dev;
35                 if (is_vlan_dev(*route_dev))
36                         *out_dev = uplink_dev;
37                 else if (mlx5e_eswitch_rep(dev))
38                         *out_dev = *route_dev;
39                 else
40                         return -EOPNOTSUPP;
41         }
42
43         if (!(mlx5e_eswitch_rep(*out_dev) &&
44               mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
45                 return -EOPNOTSUPP;
46
47         return 0;
48 }
49
50 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
51                                    struct net_device *mirred_dev,
52                                    struct net_device **out_dev,
53                                    struct net_device **route_dev,
54                                    struct flowi4 *fl4,
55                                    struct neighbour **out_n,
56                                    u8 *out_ttl)
57 {
58         struct rtable *rt;
59         struct neighbour *n = NULL;
60
61 #if IS_ENABLED(CONFIG_INET)
62         struct mlx5_core_dev *mdev = priv->mdev;
63         struct net_device *uplink_dev;
64         int ret;
65
66         if (mlx5_lag_is_multipath(mdev)) {
67                 struct mlx5_eswitch *esw = mdev->priv.eswitch;
68
69                 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
70                 fl4->flowi4_oif = uplink_dev->ifindex;
71         }
72
73         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
74         ret = PTR_ERR_OR_ZERO(rt);
75         if (ret)
76                 return ret;
77
78         if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
79                 return -ENETUNREACH;
80 #else
81         return -EOPNOTSUPP;
82 #endif
83
84         ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
85         if (ret < 0)
86                 return ret;
87
88         if (!(*out_ttl))
89                 *out_ttl = ip4_dst_hoplimit(&rt->dst);
90         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
91         ip_rt_put(rt);
92         if (!n)
93                 return -ENOMEM;
94
95         *out_n = n;
96         return 0;
97 }
98
99 static const char *mlx5e_netdev_kind(struct net_device *dev)
100 {
101         if (dev->rtnl_link_ops)
102                 return dev->rtnl_link_ops->kind;
103         else
104                 return "unknown";
105 }
106
107 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
108                                    struct net_device *mirred_dev,
109                                    struct net_device **out_dev,
110                                    struct net_device **route_dev,
111                                    struct flowi6 *fl6,
112                                    struct neighbour **out_n,
113                                    u8 *out_ttl)
114 {
115         struct neighbour *n = NULL;
116         struct dst_entry *dst;
117
118 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
119         int ret;
120
121         ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
122                                          fl6);
123         if (ret < 0)
124                 return ret;
125
126         if (!(*out_ttl))
127                 *out_ttl = ip6_dst_hoplimit(dst);
128
129         ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
130         if (ret < 0)
131                 return ret;
132 #else
133         return -EOPNOTSUPP;
134 #endif
135
136         n = dst_neigh_lookup(dst, &fl6->daddr);
137         dst_release(dst);
138         if (!n)
139                 return -ENOMEM;
140
141         *out_n = n;
142         return 0;
143 }
144
145 static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
146 {
147         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
148         struct udphdr *udp = (struct udphdr *)(buf);
149         struct vxlanhdr *vxh = (struct vxlanhdr *)
150                                ((char *)udp + sizeof(struct udphdr));
151
152         udp->dest = tun_key->tp_dst;
153         vxh->vx_flags = VXLAN_HF_VNI;
154         vxh->vx_vni = vxlan_vni_field(tun_id);
155
156         return 0;
157 }
158
159 static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
160 {
161         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
162         int hdr_len;
163         struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
164
165         /* the HW does not calculate GRE csum or sequences */
166         if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
167                 return -EOPNOTSUPP;
168
169         greh->protocol = htons(ETH_P_TEB);
170
171         /* GRE key */
172         hdr_len = gre_calc_hlen(tun_key->tun_flags);
173         greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
174         if (tun_key->tun_flags & TUNNEL_KEY) {
175                 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
176
177                 *ptr = tun_id;
178         }
179
180         return 0;
181 }
182
183 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
184                                       struct mlx5e_encap_entry *e)
185 {
186         int err = 0;
187         struct ip_tunnel_key *key = &e->tun_info.key;
188
189         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
190                 *ip_proto = IPPROTO_UDP;
191                 err = mlx5e_gen_vxlan_header(buf, key);
192         } else if  (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
193                 *ip_proto = IPPROTO_GRE;
194                 err = mlx5e_gen_gre_header(buf, key);
195         } else {
196                 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
197                         , e->tunnel_type);
198                 err = -EOPNOTSUPP;
199         }
200
201         return err;
202 }
203
204 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
205                              struct mlx5e_encap_entry *e,
206                              u16 proto)
207 {
208         struct ethhdr *eth = (struct ethhdr *)buf;
209         char *ip;
210
211         ether_addr_copy(eth->h_dest, e->h_dest);
212         ether_addr_copy(eth->h_source, dev->dev_addr);
213         if (is_vlan_dev(dev)) {
214                 struct vlan_hdr *vlan = (struct vlan_hdr *)
215                                         ((char *)eth + ETH_HLEN);
216                 ip = (char *)vlan + VLAN_HLEN;
217                 eth->h_proto = vlan_dev_vlan_proto(dev);
218                 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
219                 vlan->h_vlan_encapsulated_proto = htons(proto);
220         } else {
221                 eth->h_proto = htons(proto);
222                 ip = (char *)eth + ETH_HLEN;
223         }
224
225         return ip;
226 }
227
228 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
229                                     struct net_device *mirred_dev,
230                                     struct mlx5e_encap_entry *e)
231 {
232         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
233         struct ip_tunnel_key *tun_key = &e->tun_info.key;
234         struct net_device *out_dev, *route_dev;
235         struct neighbour *n = NULL;
236         struct flowi4 fl4 = {};
237         int ipv4_encap_size;
238         char *encap_header;
239         u8 nud_state, ttl;
240         struct iphdr *ip;
241         int err;
242
243         /* add the IP fields */
244         fl4.flowi4_tos = tun_key->tos;
245         fl4.daddr = tun_key->u.ipv4.dst;
246         fl4.saddr = tun_key->u.ipv4.src;
247         ttl = tun_key->ttl;
248
249         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
250                                       &fl4, &n, &ttl);
251         if (err)
252                 return err;
253
254         ipv4_encap_size =
255                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
256                 sizeof(struct iphdr) +
257                 e->tunnel_hlen;
258
259         if (max_encap_size < ipv4_encap_size) {
260                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
261                                ipv4_encap_size, max_encap_size);
262                 return -EOPNOTSUPP;
263         }
264
265         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
266         if (!encap_header)
267                 return -ENOMEM;
268
269         /* used by mlx5e_detach_encap to lookup a neigh hash table
270          * entry in the neigh hash table when a user deletes a rule
271          */
272         e->m_neigh.dev = n->dev;
273         e->m_neigh.family = n->ops->family;
274         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
275         e->out_dev = out_dev;
276         e->route_dev = route_dev;
277
278         /* It's important to add the neigh to the hash table before checking
279          * the neigh validity state. So if we'll get a notification, in case the
280          * neigh changes it's validity state, we would find the relevant neigh
281          * in the hash.
282          */
283         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
284         if (err)
285                 goto free_encap;
286
287         read_lock_bh(&n->lock);
288         nud_state = n->nud_state;
289         ether_addr_copy(e->h_dest, n->ha);
290         read_unlock_bh(&n->lock);
291
292         /* add ethernet header */
293         ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
294                                              ETH_P_IP);
295
296         /* add ip header */
297         ip->tos = tun_key->tos;
298         ip->version = 0x4;
299         ip->ihl = 0x5;
300         ip->ttl = ttl;
301         ip->daddr = fl4.daddr;
302         ip->saddr = fl4.saddr;
303
304         /* add tunneling protocol header */
305         err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
306                                          &ip->protocol, e);
307         if (err)
308                 goto destroy_neigh_entry;
309
310         e->encap_size = ipv4_encap_size;
311         e->encap_header = encap_header;
312
313         if (!(nud_state & NUD_VALID)) {
314                 neigh_event_send(n, NULL);
315                 /* the encap entry will be made valid on neigh update event
316                  * and not used before that.
317                  */
318                 goto out;
319         }
320
321         err = mlx5_packet_reformat_alloc(priv->mdev,
322                                          e->reformat_type,
323                                          ipv4_encap_size, encap_header,
324                                          MLX5_FLOW_NAMESPACE_FDB,
325                                          &e->encap_id);
326         if (err)
327                 goto destroy_neigh_entry;
328
329         e->flags |= MLX5_ENCAP_ENTRY_VALID;
330         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
331         neigh_release(n);
332         return err;
333
334 destroy_neigh_entry:
335         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
336 free_encap:
337         kfree(encap_header);
338 out:
339         if (n)
340                 neigh_release(n);
341         return err;
342 }
343
344 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
345                                     struct net_device *mirred_dev,
346                                     struct mlx5e_encap_entry *e)
347 {
348         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
349         struct ip_tunnel_key *tun_key = &e->tun_info.key;
350         struct net_device *out_dev, *route_dev;
351         struct neighbour *n = NULL;
352         struct flowi6 fl6 = {};
353         struct ipv6hdr *ip6h;
354         int ipv6_encap_size;
355         char *encap_header;
356         u8 nud_state, ttl;
357         int err;
358
359         ttl = tun_key->ttl;
360
361         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
362         fl6.daddr = tun_key->u.ipv6.dst;
363         fl6.saddr = tun_key->u.ipv6.src;
364
365         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
366                                       &fl6, &n, &ttl);
367         if (err)
368                 return err;
369
370         ipv6_encap_size =
371                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
372                 sizeof(struct ipv6hdr) +
373                 e->tunnel_hlen;
374
375         if (max_encap_size < ipv6_encap_size) {
376                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
377                                ipv6_encap_size, max_encap_size);
378                 return -EOPNOTSUPP;
379         }
380
381         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
382         if (!encap_header)
383                 return -ENOMEM;
384
385         /* used by mlx5e_detach_encap to lookup a neigh hash table
386          * entry in the neigh hash table when a user deletes a rule
387          */
388         e->m_neigh.dev = n->dev;
389         e->m_neigh.family = n->ops->family;
390         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
391         e->out_dev = out_dev;
392         e->route_dev = route_dev;
393
394         /* It's importent to add the neigh to the hash table before checking
395          * the neigh validity state. So if we'll get a notification, in case the
396          * neigh changes it's validity state, we would find the relevant neigh
397          * in the hash.
398          */
399         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
400         if (err)
401                 goto free_encap;
402
403         read_lock_bh(&n->lock);
404         nud_state = n->nud_state;
405         ether_addr_copy(e->h_dest, n->ha);
406         read_unlock_bh(&n->lock);
407
408         /* add ethernet header */
409         ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
410                                                  ETH_P_IPV6);
411
412         /* add ip header */
413         ip6_flow_hdr(ip6h, tun_key->tos, 0);
414         /* the HW fills up ipv6 payload len */
415         ip6h->hop_limit   = ttl;
416         ip6h->daddr       = fl6.daddr;
417         ip6h->saddr       = fl6.saddr;
418
419         /* add tunneling protocol header */
420         err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
421                                          &ip6h->nexthdr, e);
422         if (err)
423                 goto destroy_neigh_entry;
424
425         e->encap_size = ipv6_encap_size;
426         e->encap_header = encap_header;
427
428         if (!(nud_state & NUD_VALID)) {
429                 neigh_event_send(n, NULL);
430                 /* the encap entry will be made valid on neigh update event
431                  * and not used before that.
432                  */
433                 goto out;
434         }
435
436         err = mlx5_packet_reformat_alloc(priv->mdev,
437                                          e->reformat_type,
438                                          ipv6_encap_size, encap_header,
439                                          MLX5_FLOW_NAMESPACE_FDB,
440                                          &e->encap_id);
441         if (err)
442                 goto destroy_neigh_entry;
443
444         e->flags |= MLX5_ENCAP_ENTRY_VALID;
445         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
446         neigh_release(n);
447         return err;
448
449 destroy_neigh_entry:
450         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
451 free_encap:
452         kfree(encap_header);
453 out:
454         if (n)
455                 neigh_release(n);
456         return err;
457 }
458
459 int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
460 {
461         if (netif_is_vxlan(tunnel_dev))
462                 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
463         else if (netif_is_gretap(tunnel_dev) ||
464                  netif_is_ip6gretap(tunnel_dev))
465                 return MLX5E_TC_TUNNEL_TYPE_GRETAP;
466         else
467                 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
468 }
469
470 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
471                                     struct net_device *netdev)
472 {
473         int tunnel_type = mlx5e_tc_tun_get_type(netdev);
474
475         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
476             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
477                 return true;
478         else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
479                  MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
480                 return true;
481         else
482                 return false;
483 }
484
485 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
486                                  struct mlx5e_priv *priv,
487                                  struct mlx5e_encap_entry *e,
488                                  struct netlink_ext_ack *extack)
489 {
490         e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
491
492         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
493                 int dst_port =  be16_to_cpu(e->tun_info.key.tp_dst);
494
495                 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
496                         NL_SET_ERR_MSG_MOD(extack,
497                                            "vxlan udp dport was not registered with the HW");
498                         netdev_warn(priv->netdev,
499                                     "%d isn't an offloaded vxlan udp dport\n",
500                                     dst_port);
501                         return -EOPNOTSUPP;
502                 }
503                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
504                 e->tunnel_hlen = VXLAN_HLEN;
505         } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
506                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
507                 e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
508         } else {
509                 e->reformat_type = -1;
510                 e->tunnel_hlen = -1;
511                 return -EOPNOTSUPP;
512         }
513         return 0;
514 }
515
516 static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
517                                     struct mlx5_flow_spec *spec,
518                                     struct tc_cls_flower_offload *f,
519                                     void *headers_c,
520                                     void *headers_v)
521 {
522         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
523         struct netlink_ext_ack *extack = f->common.extack;
524         void *misc_c = MLX5_ADDR_OF(fte_match_param,
525                                     spec->match_criteria,
526                                     misc_parameters);
527         void *misc_v = MLX5_ADDR_OF(fte_match_param,
528                                     spec->match_value,
529                                     misc_parameters);
530         struct flow_match_ports enc_ports;
531
532         flow_rule_match_enc_ports(rule, &enc_ports);
533
534         /* Full udp dst port must be given */
535         if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
536             memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
537                 NL_SET_ERR_MSG_MOD(extack,
538                                    "VXLAN decap filter must include enc_dst_port condition");
539                 netdev_warn(priv->netdev,
540                             "VXLAN decap filter must include enc_dst_port condition\n");
541                 return -EOPNOTSUPP;
542         }
543
544         /* udp dst port must be knonwn as a VXLAN port */
545         if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
546                 NL_SET_ERR_MSG_MOD(extack,
547                                    "Matched UDP port is not registered as a VXLAN port");
548                 netdev_warn(priv->netdev,
549                             "UDP port %d is not registered as a VXLAN port\n",
550                             be16_to_cpu(enc_ports.key->dst));
551                 return -EOPNOTSUPP;
552         }
553
554         /* dst UDP port is valid here */
555         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
556         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
557
558         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
559                  ntohs(enc_ports.mask->dst));
560         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
561                  ntohs(enc_ports.key->dst));
562
563         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
564                  ntohs(enc_ports.mask->src));
565         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
566                  ntohs(enc_ports.key->src));
567
568         /* match on VNI */
569         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
570                 struct flow_match_enc_keyid enc_keyid;
571
572                 flow_rule_match_enc_keyid(rule, &enc_keyid);
573
574                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
575                          be32_to_cpu(enc_keyid.mask->keyid));
576                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
577                          be32_to_cpu(enc_keyid.key->keyid));
578         }
579         return 0;
580 }
581
582 static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
583                                      struct mlx5_flow_spec *spec,
584                                      struct tc_cls_flower_offload *f,
585                                      void *outer_headers_c,
586                                      void *outer_headers_v)
587 {
588         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
589                                     misc_parameters);
590         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
591                                     misc_parameters);
592         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
593
594         if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
595                 NL_SET_ERR_MSG_MOD(f->common.extack,
596                                    "GRE HW offloading is not supported");
597                 netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
598                 return -EOPNOTSUPP;
599         }
600
601         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
602         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
603                  ip_protocol, IPPROTO_GRE);
604
605         /* gre protocol*/
606         MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
607         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
608
609         /* gre key */
610         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
611                 struct flow_match_enc_keyid enc_keyid;
612
613                 flow_rule_match_enc_keyid(rule, &enc_keyid);
614                 MLX5_SET(fte_match_set_misc, misc_c,
615                          gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
616                 MLX5_SET(fte_match_set_misc, misc_v,
617                          gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
618         }
619
620         return 0;
621 }
622
623 int mlx5e_tc_tun_parse(struct net_device *filter_dev,
624                        struct mlx5e_priv *priv,
625                        struct mlx5_flow_spec *spec,
626                        struct tc_cls_flower_offload *f,
627                        void *headers_c,
628                        void *headers_v, u8 *match_level)
629 {
630         int tunnel_type;
631         int err = 0;
632
633         tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
634         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
635                 *match_level = MLX5_MATCH_L4;
636                 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
637                                                headers_c, headers_v);
638         } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
639                 *match_level = MLX5_MATCH_L3;
640                 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
641                                                 headers_c, headers_v);
642         } else {
643                 netdev_warn(priv->netdev,
644                             "decapsulation offload is not supported for %s (kind: \"%s\")\n",
645                             netdev_name(filter_dev),
646                             mlx5e_netdev_kind(filter_dev));
647
648                 return -EOPNOTSUPP;
649         }
650         return err;
651 }