2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/fs.h>
36 struct mlx5e_ethtool_rule {
37 struct list_head list;
38 struct ethtool_rx_flow_spec flow_spec;
39 struct mlx5_flow_handle *rule;
40 struct mlx5e_ethtool_table *eth_ft;
43 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
45 if (!--eth_ft->num_rules) {
46 mlx5_destroy_flow_table(eth_ft->ft);
51 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
52 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
53 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
54 #define MLX5E_ETHTOOL_NUM_GROUPS 10
55 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
56 struct ethtool_rx_flow_spec *fs,
59 struct mlx5e_ethtool_table *eth_ft;
60 struct mlx5_flow_namespace *ns;
61 struct mlx5_flow_table *ft;
66 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
71 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
72 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
73 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
77 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
78 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
79 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
82 max_tuples = ETHTOOL_NUM_L2_FTS;
83 prio = max_tuples - num_tuples;
84 eth_ft = &priv->fs.ethtool.l2_ft[prio];
85 prio += MLX5E_ETHTOOL_L2_PRIO;
88 return ERR_PTR(-EINVAL);
95 ns = mlx5_get_flow_namespace(priv->mdev,
96 MLX5_FLOW_NAMESPACE_ETHTOOL);
98 return ERR_PTR(-EOPNOTSUPP);
100 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
101 flow_table_properties_nic_receive.log_max_ft_size)),
102 MLX5E_ETHTOOL_NUM_ENTRIES);
103 ft = mlx5_create_auto_grouped_flow_table(ns, prio,
105 MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
113 static void mask_spec(u8 *mask, u8 *val, size_t size)
117 for (i = 0; i < size; i++, mask++, val++)
118 *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
121 #define MLX5E_FTE_SET(header_p, fld, v) \
122 MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
124 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
125 MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
128 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
129 __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
132 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
133 &ip4src_v, sizeof(ip4src_v));
134 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
135 &ip4src_m, sizeof(ip4src_m));
138 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
139 &ip4dst_v, sizeof(ip4dst_v));
140 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
141 &ip4dst_m, sizeof(ip4dst_m));
144 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
145 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
149 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
150 __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
152 u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
154 if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
155 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
157 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
160 if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
161 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
163 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
167 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
168 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
172 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
173 __be16 pdst_m, __be16 pdst_v)
176 MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
177 MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
180 MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
181 MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
184 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
185 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
189 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
190 __be16 pdst_m, __be16 pdst_v)
193 MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
194 MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
198 MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
199 MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
202 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
203 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
207 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
209 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
210 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
212 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
213 l4_mask->ip4dst, l4_val->ip4dst);
215 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
216 l4_mask->pdst, l4_val->pdst);
220 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
222 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
223 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
225 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
226 l4_mask->ip4dst, l4_val->ip4dst);
228 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
229 l4_mask->pdst, l4_val->pdst);
233 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
235 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
236 struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
238 set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
239 l3_mask->ip4dst, l3_val->ip4dst);
241 if (l3_mask->proto) {
242 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
243 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
248 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
250 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
251 struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
253 set_ip6(headers_c, headers_v, l3_mask->ip6src,
254 l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
256 if (l3_mask->l4_proto) {
257 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
258 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
263 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
265 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
266 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
268 set_ip6(headers_c, headers_v, l4_mask->ip6src,
269 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
271 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
272 l4_mask->pdst, l4_val->pdst);
276 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
278 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
279 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
281 set_ip6(headers_c, headers_v, l4_mask->ip6src,
282 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
284 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
285 l4_mask->pdst, l4_val->pdst);
289 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
291 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
292 struct ethhdr *eth_val = &fs->h_u.ether_spec;
294 mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
295 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
296 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
297 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
298 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
299 MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
300 MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
304 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
306 MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
307 MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
308 MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
309 MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
313 set_dmac(void *headers_c, void *headers_v,
314 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
316 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
317 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
320 static int set_flow_attrs(u32 *match_c, u32 *match_v,
321 struct ethtool_rx_flow_spec *fs)
323 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
325 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
327 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
331 parse_tcp4(outer_headers_c, outer_headers_v, fs);
334 parse_udp4(outer_headers_c, outer_headers_v, fs);
337 parse_ip4(outer_headers_c, outer_headers_v, fs);
340 parse_tcp6(outer_headers_c, outer_headers_v, fs);
343 parse_udp6(outer_headers_c, outer_headers_v, fs);
346 parse_ip6(outer_headers_c, outer_headers_v, fs);
349 parse_ether(outer_headers_c, outer_headers_v, fs);
355 if ((fs->flow_type & FLOW_EXT) &&
356 (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
357 set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
359 if (fs->flow_type & FLOW_MAC_EXT &&
360 !is_zero_ether_addr(fs->m_ext.h_dest)) {
361 mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
362 set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
369 static void add_rule_to_list(struct mlx5e_priv *priv,
370 struct mlx5e_ethtool_rule *rule)
372 struct mlx5e_ethtool_rule *iter;
373 struct list_head *head = &priv->fs.ethtool.rules;
375 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
376 if (iter->flow_spec.location > rule->flow_spec.location)
380 priv->fs.ethtool.tot_num_rules++;
381 list_add(&rule->list, head);
384 static bool outer_header_zero(u32 *match_criteria)
386 int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
387 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
390 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
395 static struct mlx5_flow_handle *
396 add_ethtool_flow_rule(struct mlx5e_priv *priv,
397 struct mlx5_flow_table *ft,
398 struct ethtool_rx_flow_spec *fs)
400 struct mlx5_flow_destination *dst = NULL;
401 struct mlx5_flow_act flow_act = {0};
402 struct mlx5_flow_spec *spec;
403 struct mlx5_flow_handle *rule;
406 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
408 return ERR_PTR(-ENOMEM);
409 err = set_flow_attrs(spec->match_criteria, spec->match_value,
414 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
415 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
417 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
423 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
424 dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
425 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
428 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
429 flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
430 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
433 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
440 return err ? ERR_PTR(err) : rule;
443 static void del_ethtool_rule(struct mlx5e_priv *priv,
444 struct mlx5e_ethtool_rule *eth_rule)
447 mlx5_del_flow_rules(eth_rule->rule);
448 list_del(ð_rule->list);
449 priv->fs.ethtool.tot_num_rules--;
450 put_flow_table(eth_rule->eth_ft);
454 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
457 struct mlx5e_ethtool_rule *iter;
459 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
460 if (iter->flow_spec.location == location)
466 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
469 struct mlx5e_ethtool_rule *eth_rule;
471 eth_rule = find_ethtool_rule(priv, location);
473 del_ethtool_rule(priv, eth_rule);
475 eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
477 return ERR_PTR(-ENOMEM);
479 add_rule_to_list(priv, eth_rule);
483 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
485 #define all_ones(field) (field == (__force typeof(field))-1)
486 #define all_zeros_or_all_ones(field) \
487 ((field) == 0 || (field) == (__force typeof(field))-1)
489 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
491 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
494 if (!is_zero_ether_addr(eth_mask->h_dest))
496 if (!is_zero_ether_addr(eth_mask->h_source))
498 if (eth_mask->h_proto)
503 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
505 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
519 /* Flow is TCP/UDP */
523 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
525 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
528 if (l3_mask->l4_4_bytes || l3_mask->tos ||
529 fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
541 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
543 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
546 if (l3_mask->l4_4_bytes || l3_mask->tclass)
548 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
551 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
553 if (l3_mask->l4_proto)
559 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
561 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
567 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
570 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
577 /* Flow is TCP/UDP */
581 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
583 if (fs->m_ext.vlan_etype ||
584 fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
587 if (fs->m_ext.vlan_tci &&
588 (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
594 static int validate_flow(struct mlx5e_priv *priv,
595 struct ethtool_rx_flow_spec *fs)
600 if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
603 if (fs->ring_cookie >= priv->channels.params.num_channels &&
604 fs->ring_cookie != RX_CLS_FLOW_DISC)
607 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
609 num_tuples += validate_ethter(fs);
613 ret = validate_tcpudp4(fs);
619 ret = validate_ip4(fs);
626 ret = validate_tcpudp6(fs);
632 ret = validate_ip6(fs);
640 if ((fs->flow_type & FLOW_EXT)) {
641 ret = validate_vlan(fs);
647 if (fs->flow_type & FLOW_MAC_EXT &&
648 !is_zero_ether_addr(fs->m_ext.h_dest))
655 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
656 struct ethtool_rx_flow_spec *fs)
658 struct mlx5e_ethtool_table *eth_ft;
659 struct mlx5e_ethtool_rule *eth_rule;
660 struct mlx5_flow_handle *rule;
664 num_tuples = validate_flow(priv, fs);
665 if (num_tuples <= 0) {
666 netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
667 __func__, num_tuples);
671 eth_ft = get_flow_table(priv, fs, num_tuples);
673 return PTR_ERR(eth_ft);
675 eth_rule = get_ethtool_rule(priv, fs->location);
676 if (IS_ERR(eth_rule)) {
677 put_flow_table(eth_ft);
678 return PTR_ERR(eth_rule);
681 eth_rule->flow_spec = *fs;
682 eth_rule->eth_ft = eth_ft;
685 goto del_ethtool_rule;
687 rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
690 goto del_ethtool_rule;
693 eth_rule->rule = rule;
698 del_ethtool_rule(priv, eth_rule);
704 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
706 struct mlx5e_ethtool_rule *eth_rule;
709 if (location >= MAX_NUM_OF_ETHTOOL_RULES)
712 eth_rule = find_ethtool_rule(priv, location);
718 del_ethtool_rule(priv, eth_rule);
724 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
725 struct ethtool_rxnfc *info, int location)
727 struct mlx5e_ethtool_rule *eth_rule;
729 if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
732 list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
733 if (eth_rule->flow_spec.location == location) {
734 info->fs = eth_rule->flow_spec;
743 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
744 struct ethtool_rxnfc *info, u32 *rule_locs)
750 info->data = MAX_NUM_OF_ETHTOOL_RULES;
751 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
752 err = mlx5e_ethtool_get_flow(priv, info, location);
754 rule_locs[idx++] = location;
760 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
762 struct mlx5e_ethtool_rule *iter;
763 struct mlx5e_ethtool_rule *temp;
765 list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
766 del_ethtool_rule(priv, iter);
769 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
771 INIT_LIST_HEAD(&priv->fs.ethtool.rules);
774 static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
778 return MLX5E_TT_IPV4_TCP;
780 return MLX5E_TT_IPV6_TCP;
782 return MLX5E_TT_IPV4_UDP;
784 return MLX5E_TT_IPV6_UDP;
786 return MLX5E_TT_IPV4_IPSEC_AH;
788 return MLX5E_TT_IPV6_IPSEC_AH;
790 return MLX5E_TT_IPV4_IPSEC_ESP;
792 return MLX5E_TT_IPV6_IPSEC_ESP;
794 return MLX5E_TT_IPV4;
796 return MLX5E_TT_IPV6;
798 return MLX5E_NUM_INDIR_TIRS;
802 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
803 struct ethtool_rxnfc *nfc)
805 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
806 enum mlx5e_traffic_types tt;
807 u8 rx_hash_field = 0;
810 tt = flow_type_to_traffic_type(nfc->flow_type);
811 if (tt == MLX5E_NUM_INDIR_TIRS)
814 /* RSS does not support anything other than hashing to queues
815 * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
818 if (nfc->flow_type != TCP_V4_FLOW &&
819 nfc->flow_type != TCP_V6_FLOW &&
820 nfc->flow_type != UDP_V4_FLOW &&
821 nfc->flow_type != UDP_V6_FLOW)
824 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
825 RXH_L4_B_0_1 | RXH_L4_B_2_3))
828 if (nfc->data & RXH_IP_SRC)
829 rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
830 if (nfc->data & RXH_IP_DST)
831 rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
832 if (nfc->data & RXH_L4_B_0_1)
833 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
834 if (nfc->data & RXH_L4_B_2_3)
835 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
837 in = kvzalloc(inlen, GFP_KERNEL);
841 mutex_lock(&priv->state_lock);
843 if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
846 priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
847 mlx5e_modify_tirs_hash(priv, in, inlen);
850 mutex_unlock(&priv->state_lock);
855 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
856 struct ethtool_rxnfc *nfc)
858 enum mlx5e_traffic_types tt;
861 tt = flow_type_to_traffic_type(nfc->flow_type);
862 if (tt == MLX5E_NUM_INDIR_TIRS)
865 hash_field = priv->rss_params.rx_hash_fields[tt];
868 if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
869 nfc->data |= RXH_IP_SRC;
870 if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
871 nfc->data |= RXH_IP_DST;
872 if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
873 nfc->data |= RXH_L4_B_0_1;
874 if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
875 nfc->data |= RXH_L4_B_2_3;
880 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
883 struct mlx5e_priv *priv = netdev_priv(dev);
886 case ETHTOOL_SRXCLSRLINS:
887 err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
889 case ETHTOOL_SRXCLSRLDEL:
890 err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
893 err = mlx5e_set_rss_hash_opt(priv, cmd);
903 int mlx5e_get_rxnfc(struct net_device *dev,
904 struct ethtool_rxnfc *info, u32 *rule_locs)
906 struct mlx5e_priv *priv = netdev_priv(dev);
910 case ETHTOOL_GRXRINGS:
911 info->data = priv->channels.params.num_channels;
913 case ETHTOOL_GRXCLSRLCNT:
914 info->rule_cnt = priv->fs.ethtool.tot_num_rules;
916 case ETHTOOL_GRXCLSRULE:
917 err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
919 case ETHTOOL_GRXCLSRLALL:
920 err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
923 err = mlx5e_get_rss_hash_opt(priv, info);