1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
12 struct nfp_flower_meta_tci *msk,
13 struct flow_rule *rule, u8 key_type)
17 memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
18 memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
20 /* Populate the metadata frame. */
21 ext->nfp_flow_key_layer = key_type;
24 msk->nfp_flow_key_layer = key_type;
27 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
28 struct flow_match_vlan match;
30 flow_rule_match_vlan(rule, &match);
31 /* Populate the tci field. */
32 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
33 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
34 match.key->vlan_priority) |
35 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
37 ext->tci = cpu_to_be16(tmp_tci);
39 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
40 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
41 match.mask->vlan_priority) |
42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44 msk->tci = cpu_to_be16(tmp_tci);
49 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
51 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
55 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
56 bool mask_version, enum nfp_flower_tun_type tun_type,
57 struct netlink_ext_ack *extack)
60 frame->in_port = cpu_to_be32(~0);
65 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
68 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
71 frame->in_port = cpu_to_be32(cmsg_port);
78 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
79 struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
80 struct netlink_ext_ack *extack)
82 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
83 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
85 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
86 struct flow_match_eth_addrs match;
88 flow_rule_match_eth_addrs(rule, &match);
89 /* Populate mac frame. */
90 ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
91 ether_addr_copy(ext->mac_src, &match.key->src[0]);
92 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
93 ether_addr_copy(msk->mac_src, &match.mask->src[0]);
96 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
97 struct flow_match_mpls match;
100 flow_rule_match_mpls(rule, &match);
102 /* Only support matching the first LSE */
103 if (match.mask->used_lses != 1) {
104 NL_SET_ERR_MSG_MOD(extack,
105 "unsupported offload: invalid LSE depth for MPLS match offload");
109 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
110 match.key->ls[0].mpls_label) |
111 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
112 match.key->ls[0].mpls_tc) |
113 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
114 match.key->ls[0].mpls_bos) |
115 NFP_FLOWER_MASK_MPLS_Q;
116 ext->mpls_lse = cpu_to_be32(t_mpls);
117 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
118 match.mask->ls[0].mpls_label) |
119 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
120 match.mask->ls[0].mpls_tc) |
121 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
122 match.mask->ls[0].mpls_bos) |
123 NFP_FLOWER_MASK_MPLS_Q;
124 msk->mpls_lse = cpu_to_be32(t_mpls);
125 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
126 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
127 * bit, which indicates an mpls ether type but without any
130 struct flow_match_basic match;
132 flow_rule_match_basic(rule, &match);
133 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
134 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
135 ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
136 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
144 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
145 struct nfp_flower_tp_ports *msk,
146 struct flow_rule *rule)
148 memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
149 memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
151 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
152 struct flow_match_ports match;
154 flow_rule_match_ports(rule, &match);
155 ext->port_src = match.key->src;
156 ext->port_dst = match.key->dst;
157 msk->port_src = match.mask->src;
158 msk->port_dst = match.mask->dst;
163 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
164 struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
166 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
167 struct flow_match_basic match;
169 flow_rule_match_basic(rule, &match);
170 ext->proto = match.key->ip_proto;
171 msk->proto = match.mask->ip_proto;
174 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
175 struct flow_match_ip match;
177 flow_rule_match_ip(rule, &match);
178 ext->tos = match.key->tos;
179 ext->ttl = match.key->ttl;
180 msk->tos = match.mask->tos;
181 msk->ttl = match.mask->ttl;
184 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
185 u16 tcp_flags, tcp_flags_mask;
186 struct flow_match_tcp match;
188 flow_rule_match_tcp(rule, &match);
189 tcp_flags = be16_to_cpu(match.key->flags);
190 tcp_flags_mask = be16_to_cpu(match.mask->flags);
192 if (tcp_flags & TCPHDR_FIN)
193 ext->flags |= NFP_FL_TCP_FLAG_FIN;
194 if (tcp_flags_mask & TCPHDR_FIN)
195 msk->flags |= NFP_FL_TCP_FLAG_FIN;
197 if (tcp_flags & TCPHDR_SYN)
198 ext->flags |= NFP_FL_TCP_FLAG_SYN;
199 if (tcp_flags_mask & TCPHDR_SYN)
200 msk->flags |= NFP_FL_TCP_FLAG_SYN;
202 if (tcp_flags & TCPHDR_RST)
203 ext->flags |= NFP_FL_TCP_FLAG_RST;
204 if (tcp_flags_mask & TCPHDR_RST)
205 msk->flags |= NFP_FL_TCP_FLAG_RST;
207 if (tcp_flags & TCPHDR_PSH)
208 ext->flags |= NFP_FL_TCP_FLAG_PSH;
209 if (tcp_flags_mask & TCPHDR_PSH)
210 msk->flags |= NFP_FL_TCP_FLAG_PSH;
212 if (tcp_flags & TCPHDR_URG)
213 ext->flags |= NFP_FL_TCP_FLAG_URG;
214 if (tcp_flags_mask & TCPHDR_URG)
215 msk->flags |= NFP_FL_TCP_FLAG_URG;
218 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
219 struct flow_match_control match;
221 flow_rule_match_control(rule, &match);
222 if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
223 ext->flags |= NFP_FL_IP_FRAGMENTED;
224 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
225 msk->flags |= NFP_FL_IP_FRAGMENTED;
226 if (match.key->flags & FLOW_DIS_FIRST_FRAG)
227 ext->flags |= NFP_FL_IP_FRAG_FIRST;
228 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
229 msk->flags |= NFP_FL_IP_FRAG_FIRST;
234 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
235 struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
237 struct flow_match_ipv4_addrs match;
239 memset(ext, 0, sizeof(struct nfp_flower_ipv4));
240 memset(msk, 0, sizeof(struct nfp_flower_ipv4));
242 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
243 flow_rule_match_ipv4_addrs(rule, &match);
244 ext->ipv4_src = match.key->src;
245 ext->ipv4_dst = match.key->dst;
246 msk->ipv4_src = match.mask->src;
247 msk->ipv4_dst = match.mask->dst;
250 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
254 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
255 struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
257 memset(ext, 0, sizeof(struct nfp_flower_ipv6));
258 memset(msk, 0, sizeof(struct nfp_flower_ipv6));
260 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
261 struct flow_match_ipv6_addrs match;
263 flow_rule_match_ipv6_addrs(rule, &match);
264 ext->ipv6_src = match.key->src;
265 ext->ipv6_dst = match.key->dst;
266 msk->ipv6_src = match.mask->src;
267 msk->ipv6_dst = match.mask->dst;
270 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
274 nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
276 struct flow_match_enc_opts match;
278 flow_rule_match_enc_opts(rule, &match);
279 memcpy(ext, match.key->data, match.key->len);
280 memcpy(msk, match.mask->data, match.mask->len);
286 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
287 struct nfp_flower_tun_ipv4 *msk,
288 struct flow_rule *rule)
290 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
291 struct flow_match_ipv4_addrs match;
293 flow_rule_match_enc_ipv4_addrs(rule, &match);
294 ext->src = match.key->src;
295 ext->dst = match.key->dst;
296 msk->src = match.mask->src;
297 msk->dst = match.mask->dst;
302 nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
303 struct nfp_flower_tun_ipv6 *msk,
304 struct flow_rule *rule)
306 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
307 struct flow_match_ipv6_addrs match;
309 flow_rule_match_enc_ipv6_addrs(rule, &match);
310 ext->src = match.key->src;
311 ext->dst = match.key->dst;
312 msk->src = match.mask->src;
313 msk->dst = match.mask->dst;
318 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
319 struct nfp_flower_tun_ip_ext *msk,
320 struct flow_rule *rule)
322 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
323 struct flow_match_ip match;
325 flow_rule_match_enc_ip(rule, &match);
326 ext->tos = match.key->tos;
327 ext->ttl = match.key->ttl;
328 msk->tos = match.mask->tos;
329 msk->ttl = match.mask->ttl;
334 nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
335 struct flow_rule *rule)
337 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
338 struct flow_match_enc_keyid match;
341 flow_rule_match_enc_keyid(rule, &match);
342 vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
343 *key = cpu_to_be32(vni);
344 vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
345 *key_msk = cpu_to_be32(vni);
350 nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
351 __be16 *flags_msk, struct flow_rule *rule)
353 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
354 struct flow_match_enc_keyid match;
356 flow_rule_match_enc_keyid(rule, &match);
357 *key = match.key->keyid;
358 *key_msk = match.mask->keyid;
360 *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
361 *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
366 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
367 struct nfp_flower_ipv4_gre_tun *msk,
368 struct flow_rule *rule)
370 memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
371 memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
373 /* NVGRE is the only supported GRE tunnel type */
374 ext->ethertype = cpu_to_be16(ETH_P_TEB);
375 msk->ethertype = cpu_to_be16(~0);
377 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
378 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
379 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
380 &ext->tun_flags, &msk->tun_flags, rule);
384 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
385 struct nfp_flower_ipv4_udp_tun *msk,
386 struct flow_rule *rule)
388 memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
389 memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
391 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
392 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
393 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
397 nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
398 struct nfp_flower_ipv6_udp_tun *msk,
399 struct flow_rule *rule)
401 memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
402 memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
404 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
405 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
406 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
410 nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
411 struct nfp_flower_ipv6_gre_tun *msk,
412 struct flow_rule *rule)
414 memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
415 memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
417 /* NVGRE is the only supported GRE tunnel type */
418 ext->ethertype = cpu_to_be16(ETH_P_TEB);
419 msk->ethertype = cpu_to_be16(~0);
421 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
422 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
423 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
424 &ext->tun_flags, &msk->tun_flags, rule);
427 int nfp_flower_compile_flow_match(struct nfp_app *app,
428 struct flow_cls_offload *flow,
429 struct nfp_fl_key_ls *key_ls,
430 struct net_device *netdev,
431 struct nfp_fl_payload *nfp_flow,
432 enum nfp_flower_tun_type tun_type,
433 struct netlink_ext_ack *extack)
435 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
441 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
443 memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
444 memset(nfp_flow->mask_data, 0, key_ls->key_size);
446 ext = nfp_flow->unmasked_data;
447 msk = nfp_flow->mask_data;
449 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
450 (struct nfp_flower_meta_tci *)msk,
451 rule, key_ls->key_layer);
452 ext += sizeof(struct nfp_flower_meta_tci);
453 msk += sizeof(struct nfp_flower_meta_tci);
455 /* Populate Extended Metadata if Required. */
456 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
457 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
458 key_ls->key_layer_two);
459 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
460 key_ls->key_layer_two);
461 ext += sizeof(struct nfp_flower_ext_meta);
462 msk += sizeof(struct nfp_flower_ext_meta);
465 /* Populate Exact Port data. */
466 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
467 port_id, false, tun_type, extack);
471 /* Populate Mask Port Data. */
472 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
473 port_id, true, tun_type, extack);
477 ext += sizeof(struct nfp_flower_in_port);
478 msk += sizeof(struct nfp_flower_in_port);
480 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
481 err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
482 (struct nfp_flower_mac_mpls *)msk,
487 ext += sizeof(struct nfp_flower_mac_mpls);
488 msk += sizeof(struct nfp_flower_mac_mpls);
491 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
492 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
493 (struct nfp_flower_tp_ports *)msk,
495 ext += sizeof(struct nfp_flower_tp_ports);
496 msk += sizeof(struct nfp_flower_tp_ports);
499 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
500 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
501 (struct nfp_flower_ipv4 *)msk,
503 ext += sizeof(struct nfp_flower_ipv4);
504 msk += sizeof(struct nfp_flower_ipv4);
507 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
508 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
509 (struct nfp_flower_ipv6 *)msk,
511 ext += sizeof(struct nfp_flower_ipv6);
512 msk += sizeof(struct nfp_flower_ipv6);
515 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
516 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
517 struct nfp_flower_ipv6_gre_tun *gre_match;
518 struct nfp_ipv6_addr_entry *entry;
519 struct in6_addr *dst;
521 nfp_flower_compile_ipv6_gre_tun((void *)ext,
523 gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
524 dst = &gre_match->ipv6.dst;
525 ext += sizeof(struct nfp_flower_ipv6_gre_tun);
526 msk += sizeof(struct nfp_flower_ipv6_gre_tun);
528 entry = nfp_tunnel_add_ipv6_off(app, dst);
532 nfp_flow->nfp_tun_ipv6 = entry;
536 nfp_flower_compile_ipv4_gre_tun((void *)ext,
538 dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
539 ext += sizeof(struct nfp_flower_ipv4_gre_tun);
540 msk += sizeof(struct nfp_flower_ipv4_gre_tun);
542 /* Store the tunnel destination in the rule data.
543 * This must be present and be an exact match.
545 nfp_flow->nfp_tun_ipv4_addr = dst;
546 nfp_tunnel_add_ipv4_off(app, dst);
550 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
551 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
552 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
553 struct nfp_flower_ipv6_udp_tun *udp_match;
554 struct nfp_ipv6_addr_entry *entry;
555 struct in6_addr *dst;
557 nfp_flower_compile_ipv6_udp_tun((void *)ext,
559 udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
560 dst = &udp_match->ipv6.dst;
561 ext += sizeof(struct nfp_flower_ipv6_udp_tun);
562 msk += sizeof(struct nfp_flower_ipv6_udp_tun);
564 entry = nfp_tunnel_add_ipv6_off(app, dst);
568 nfp_flow->nfp_tun_ipv6 = entry;
572 nfp_flower_compile_ipv4_udp_tun((void *)ext,
574 dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
575 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
576 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
578 /* Store the tunnel destination in the rule data.
579 * This must be present and be an exact match.
581 nfp_flow->nfp_tun_ipv4_addr = dst;
582 nfp_tunnel_add_ipv4_off(app, dst);
585 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
586 err = nfp_flower_compile_geneve_opt(ext, msk, rule);