2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
45 #include "en/tc_tun.h"
47 #include "lib/port_tun.h"
49 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
50 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
51 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
53 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
55 struct mlx5e_rep_indr_block_priv {
56 struct net_device *netdev;
57 struct mlx5e_rep_priv *rpriv;
59 struct list_head list;
62 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
63 struct net_device *netdev);
65 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
66 struct ethtool_drvinfo *drvinfo)
68 struct mlx5e_priv *priv = netdev_priv(dev);
69 struct mlx5_core_dev *mdev = priv->mdev;
71 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
72 sizeof(drvinfo->driver));
73 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
74 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
76 fw_rev_maj(mdev), fw_rev_min(mdev),
77 fw_rev_sub(mdev), mdev->board_id);
80 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
81 struct ethtool_drvinfo *drvinfo)
83 struct mlx5e_priv *priv = netdev_priv(dev);
85 mlx5e_rep_get_drvinfo(dev, drvinfo);
86 strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
87 sizeof(drvinfo->bus_info));
90 static const struct counter_desc sw_rep_stats_desc[] = {
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
104 static const struct counter_desc vport_rep_stats_desc[] = {
105 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
106 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
107 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
108 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
111 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
112 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
114 static void mlx5e_rep_get_strings(struct net_device *dev,
115 u32 stringset, uint8_t *data)
121 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
122 strcpy(data + (i * ETH_GSTRING_LEN),
123 sw_rep_stats_desc[i].format);
124 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
125 strcpy(data + (i * ETH_GSTRING_LEN),
126 vport_rep_stats_desc[j].format);
131 static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
133 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
134 struct mlx5e_rep_priv *rpriv = priv->ppriv;
135 struct mlx5_eswitch_rep *rep = rpriv->rep;
136 struct rtnl_link_stats64 *vport_stats;
137 struct ifla_vf_stats vf_stats;
140 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
142 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
146 vport_stats = &priv->stats.vf_vport;
147 /* flip tx/rx as we are reporting the counters for the switch vport */
148 vport_stats->rx_packets = vf_stats.tx_packets;
149 vport_stats->rx_bytes = vf_stats.tx_bytes;
150 vport_stats->tx_packets = vf_stats.rx_packets;
151 vport_stats->tx_bytes = vf_stats.rx_bytes;
154 static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
156 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
157 struct rtnl_link_stats64 *vport_stats;
159 mlx5e_grp_802_3_update_stats(priv);
161 vport_stats = &priv->stats.vf_vport;
163 vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
164 vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
165 vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
166 vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
169 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
171 struct mlx5e_rep_priv *rpriv = priv->ppriv;
172 struct mlx5_eswitch_rep *rep = rpriv->rep;
174 if (rep->vport == MLX5_VPORT_UPLINK)
175 mlx5e_uplink_rep_update_hw_counters(priv);
177 mlx5e_vf_rep_update_hw_counters(priv);
180 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
182 struct mlx5e_sw_stats *s = &priv->stats.sw;
183 struct rtnl_link_stats64 stats64 = {};
185 memset(s, 0, sizeof(*s));
186 mlx5e_fold_sw_stats64(priv, &stats64);
188 s->rx_packets = stats64.rx_packets;
189 s->rx_bytes = stats64.rx_bytes;
190 s->tx_packets = stats64.tx_packets;
191 s->tx_bytes = stats64.tx_bytes;
192 s->tx_queue_dropped = stats64.tx_dropped;
195 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
196 struct ethtool_stats *stats, u64 *data)
198 struct mlx5e_priv *priv = netdev_priv(dev);
204 mutex_lock(&priv->state_lock);
205 mlx5e_rep_update_sw_counters(priv);
206 mlx5e_rep_update_hw_counters(priv);
207 mutex_unlock(&priv->state_lock);
209 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
210 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
211 sw_rep_stats_desc, i);
213 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
214 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
215 vport_rep_stats_desc, j);
218 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
222 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
228 static void mlx5e_rep_get_ringparam(struct net_device *dev,
229 struct ethtool_ringparam *param)
231 struct mlx5e_priv *priv = netdev_priv(dev);
233 mlx5e_ethtool_get_ringparam(priv, param);
236 static int mlx5e_rep_set_ringparam(struct net_device *dev,
237 struct ethtool_ringparam *param)
239 struct mlx5e_priv *priv = netdev_priv(dev);
241 return mlx5e_ethtool_set_ringparam(priv, param);
244 static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
245 struct mlx5_flow_destination *dest)
247 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
248 struct mlx5e_rep_priv *rpriv = priv->ppriv;
249 struct mlx5_eswitch_rep *rep = rpriv->rep;
250 struct mlx5_flow_handle *flow_rule;
252 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
255 if (IS_ERR(flow_rule))
256 return PTR_ERR(flow_rule);
258 mlx5_del_flow_rules(rpriv->vport_rx_rule);
259 rpriv->vport_rx_rule = flow_rule;
263 static void mlx5e_rep_get_channels(struct net_device *dev,
264 struct ethtool_channels *ch)
266 struct mlx5e_priv *priv = netdev_priv(dev);
268 mlx5e_ethtool_get_channels(priv, ch);
271 static int mlx5e_rep_set_channels(struct net_device *dev,
272 struct ethtool_channels *ch)
274 struct mlx5e_priv *priv = netdev_priv(dev);
275 u16 curr_channels_amount = priv->channels.params.num_channels;
276 u32 new_channels_amount = ch->combined_count;
277 struct mlx5_flow_destination new_dest;
280 err = mlx5e_ethtool_set_channels(priv, ch);
284 if (curr_channels_amount == 1 && new_channels_amount > 1) {
285 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
286 new_dest.ft = priv->fs.ttc.ft.t;
287 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
288 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
289 new_dest.tir_num = priv->direct_tir[0].tirn;
294 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
296 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
297 curr_channels_amount, new_channels_amount);
304 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
305 struct ethtool_coalesce *coal)
307 struct mlx5e_priv *priv = netdev_priv(netdev);
309 return mlx5e_ethtool_get_coalesce(priv, coal);
312 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
313 struct ethtool_coalesce *coal)
315 struct mlx5e_priv *priv = netdev_priv(netdev);
317 return mlx5e_ethtool_set_coalesce(priv, coal);
320 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
322 struct mlx5e_priv *priv = netdev_priv(netdev);
324 return mlx5e_ethtool_get_rxfh_key_size(priv);
327 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
329 struct mlx5e_priv *priv = netdev_priv(netdev);
331 return mlx5e_ethtool_get_rxfh_indir_size(priv);
334 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
335 struct ethtool_pauseparam *pauseparam)
337 struct mlx5e_priv *priv = netdev_priv(netdev);
339 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
342 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
343 struct ethtool_pauseparam *pauseparam)
345 struct mlx5e_priv *priv = netdev_priv(netdev);
347 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
350 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
351 struct ethtool_link_ksettings *link_ksettings)
353 struct mlx5e_priv *priv = netdev_priv(netdev);
355 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
358 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
359 const struct ethtool_link_ksettings *link_ksettings)
361 struct mlx5e_priv *priv = netdev_priv(netdev);
363 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
366 static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
367 .get_drvinfo = mlx5e_rep_get_drvinfo,
368 .get_link = ethtool_op_get_link,
369 .get_strings = mlx5e_rep_get_strings,
370 .get_sset_count = mlx5e_rep_get_sset_count,
371 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
372 .get_ringparam = mlx5e_rep_get_ringparam,
373 .set_ringparam = mlx5e_rep_set_ringparam,
374 .get_channels = mlx5e_rep_get_channels,
375 .set_channels = mlx5e_rep_set_channels,
376 .get_coalesce = mlx5e_rep_get_coalesce,
377 .set_coalesce = mlx5e_rep_set_coalesce,
378 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
379 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
382 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
383 .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
384 .get_link = ethtool_op_get_link,
385 .get_strings = mlx5e_rep_get_strings,
386 .get_sset_count = mlx5e_rep_get_sset_count,
387 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
388 .get_ringparam = mlx5e_rep_get_ringparam,
389 .set_ringparam = mlx5e_rep_set_ringparam,
390 .get_channels = mlx5e_rep_get_channels,
391 .set_channels = mlx5e_rep_set_channels,
392 .get_coalesce = mlx5e_rep_get_coalesce,
393 .set_coalesce = mlx5e_rep_set_coalesce,
394 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
395 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
396 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
397 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
398 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
399 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
402 static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
403 struct netdev_phys_item_id *ppid)
405 struct mlx5e_priv *priv = netdev_priv(dev);
406 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
407 struct net_device *uplink_upper = NULL;
408 struct mlx5e_priv *uplink_priv = NULL;
409 struct net_device *uplink_dev;
411 if (esw->mode == SRIOV_NONE)
414 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
416 uplink_upper = netdev_master_upper_dev_get(uplink_dev);
417 uplink_priv = netdev_priv(uplink_dev);
420 ppid->id_len = ETH_ALEN;
421 if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
422 ether_addr_copy(ppid->id, uplink_upper->dev_addr);
424 struct mlx5e_rep_priv *rpriv = priv->ppriv;
425 struct mlx5_eswitch_rep *rep = rpriv->rep;
427 ether_addr_copy(ppid->id, rep->hw_id);
433 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
434 struct mlx5_eswitch_rep *rep)
436 struct mlx5e_rep_sq *rep_sq, *tmp;
437 struct mlx5e_rep_priv *rpriv;
439 if (esw->mode != SRIOV_OFFLOADS)
442 rpriv = mlx5e_rep_to_rep_priv(rep);
443 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
444 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
445 list_del(&rep_sq->list);
450 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
451 struct mlx5_eswitch_rep *rep,
452 u32 *sqns_array, int sqns_num)
454 struct mlx5_flow_handle *flow_rule;
455 struct mlx5e_rep_priv *rpriv;
456 struct mlx5e_rep_sq *rep_sq;
460 if (esw->mode != SRIOV_OFFLOADS)
463 rpriv = mlx5e_rep_to_rep_priv(rep);
464 for (i = 0; i < sqns_num; i++) {
465 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
471 /* Add re-inject rule to the PF/representor sqs */
472 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
475 if (IS_ERR(flow_rule)) {
476 err = PTR_ERR(flow_rule);
480 rep_sq->send_to_vport_rule = flow_rule;
481 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
486 mlx5e_sqs2vport_stop(esw, rep);
490 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
492 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
493 struct mlx5e_rep_priv *rpriv = priv->ppriv;
494 struct mlx5_eswitch_rep *rep = rpriv->rep;
495 struct mlx5e_channel *c;
496 int n, tc, num_sqs = 0;
500 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
504 for (n = 0; n < priv->channels.num; n++) {
505 c = priv->channels.c[n];
506 for (tc = 0; tc < c->num_tc; tc++)
507 sqs[num_sqs++] = c->sq[tc].sqn;
510 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
515 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
519 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
521 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
522 struct mlx5e_rep_priv *rpriv = priv->ppriv;
523 struct mlx5_eswitch_rep *rep = rpriv->rep;
525 mlx5e_sqs2vport_stop(esw, rep);
528 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
530 #if IS_ENABLED(CONFIG_IPV6)
531 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
534 unsigned long ipv6_interval = ~0UL;
536 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
538 struct net_device *netdev = rpriv->netdev;
539 struct mlx5e_priv *priv = netdev_priv(netdev);
541 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
542 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
545 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
547 struct mlx5e_rep_priv *rpriv = priv->ppriv;
548 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
550 mlx5_fc_queue_stats_work(priv->mdev,
551 &neigh_update->neigh_stats_work,
552 neigh_update->min_interval);
555 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
557 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
558 neigh_update.neigh_stats_work.work);
559 struct net_device *netdev = rpriv->netdev;
560 struct mlx5e_priv *priv = netdev_priv(netdev);
561 struct mlx5e_neigh_hash_entry *nhe;
564 if (!list_empty(&rpriv->neigh_update.neigh_list))
565 mlx5e_rep_queue_neigh_stats_work(priv);
567 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
568 mlx5e_tc_update_neigh_used_value(nhe);
573 static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
575 refcount_inc(&nhe->refcnt);
578 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
580 if (refcount_dec_and_test(&nhe->refcnt))
584 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
585 struct mlx5e_encap_entry *e,
586 bool neigh_connected,
587 unsigned char ha[ETH_ALEN])
589 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
593 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
594 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
595 mlx5e_tc_encap_flows_del(priv, e);
597 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
598 ether_addr_copy(e->h_dest, ha);
599 ether_addr_copy(eth->h_dest, ha);
600 /* Update the encap source mac, in case that we delete
601 * the flows when encap source mac changed.
603 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
605 mlx5e_tc_encap_flows_add(priv, e);
609 static void mlx5e_rep_neigh_update(struct work_struct *work)
611 struct mlx5e_neigh_hash_entry *nhe =
612 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
613 struct neighbour *n = nhe->n;
614 struct mlx5e_encap_entry *e;
615 unsigned char ha[ETH_ALEN];
616 struct mlx5e_priv *priv;
617 bool neigh_connected;
618 bool encap_connected;
623 /* If these parameters are changed after we release the lock,
624 * we'll receive another event letting us know about it.
625 * We use this lock to avoid inconsistency between the neigh validity
626 * and it's hw address.
628 read_lock_bh(&n->lock);
629 memcpy(ha, n->ha, ETH_ALEN);
630 nud_state = n->nud_state;
632 read_unlock_bh(&n->lock);
634 neigh_connected = (nud_state & NUD_VALID) && !dead;
636 list_for_each_entry(e, &nhe->encap_list, encap_list) {
637 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
638 priv = netdev_priv(e->out_dev);
640 if (encap_connected != neigh_connected ||
641 !ether_addr_equal(e->h_dest, ha))
642 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
644 mlx5e_rep_neigh_entry_release(nhe);
649 static struct mlx5e_rep_indr_block_priv *
650 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
651 struct net_device *netdev)
653 struct mlx5e_rep_indr_block_priv *cb_priv;
655 /* All callback list access should be protected by RTNL. */
658 list_for_each_entry(cb_priv,
659 &rpriv->uplink_priv.tc_indr_block_priv_list,
661 if (cb_priv->netdev == netdev)
667 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
669 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
670 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
672 list_for_each_entry_safe(cb_priv, temp, head, list) {
673 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
679 mlx5e_rep_indr_offload(struct net_device *netdev,
680 struct tc_cls_flower_offload *flower,
681 struct mlx5e_rep_indr_block_priv *indr_priv)
683 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
684 int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
687 switch (flower->command) {
688 case TC_CLSFLOWER_REPLACE:
689 err = mlx5e_configure_flower(netdev, priv, flower, flags);
691 case TC_CLSFLOWER_DESTROY:
692 err = mlx5e_delete_flower(netdev, priv, flower, flags);
694 case TC_CLSFLOWER_STATS:
695 err = mlx5e_stats_flower(netdev, priv, flower, flags);
704 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
705 void *type_data, void *indr_priv)
707 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
710 case TC_SETUP_CLSFLOWER:
711 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
718 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
719 struct mlx5e_rep_priv *rpriv,
720 struct tc_block_offload *f)
722 struct mlx5e_rep_indr_block_priv *indr_priv;
725 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
728 switch (f->command) {
730 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
734 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
738 indr_priv->netdev = netdev;
739 indr_priv->rpriv = rpriv;
740 list_add(&indr_priv->list,
741 &rpriv->uplink_priv.tc_indr_block_priv_list);
743 err = tcf_block_cb_register(f->block,
744 mlx5e_rep_indr_setup_block_cb,
745 indr_priv, indr_priv, f->extack);
747 list_del(&indr_priv->list);
752 case TC_BLOCK_UNBIND:
753 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
757 tcf_block_cb_unregister(f->block,
758 mlx5e_rep_indr_setup_block_cb,
760 list_del(&indr_priv->list);
771 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
772 enum tc_setup_type type, void *type_data)
776 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
783 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
784 struct net_device *netdev)
788 err = __tc_indr_block_cb_register(netdev, rpriv,
789 mlx5e_rep_indr_setup_tc_cb,
792 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
794 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
795 netdev_name(netdev), err);
800 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
801 struct net_device *netdev)
803 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
807 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
808 unsigned long event, void *ptr)
810 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
811 uplink_priv.netdevice_nb);
812 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
813 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
815 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
816 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
820 case NETDEV_REGISTER:
821 mlx5e_rep_indr_register_block(rpriv, netdev);
823 case NETDEV_UNREGISTER:
824 mlx5e_rep_indr_unregister_block(rpriv, netdev);
830 static struct mlx5e_neigh_hash_entry *
831 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
832 struct mlx5e_neigh *m_neigh);
834 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
835 unsigned long event, void *ptr)
837 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
838 neigh_update.netevent_nb);
839 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
840 struct net_device *netdev = rpriv->netdev;
841 struct mlx5e_priv *priv = netdev_priv(netdev);
842 struct mlx5e_neigh_hash_entry *nhe = NULL;
843 struct mlx5e_neigh m_neigh = {};
844 struct neigh_parms *p;
849 case NETEVENT_NEIGH_UPDATE:
851 #if IS_ENABLED(CONFIG_IPV6)
852 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
854 if (n->tbl != &arp_tbl)
858 m_neigh.dev = n->dev;
859 m_neigh.family = n->ops->family;
860 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
862 /* We are in atomic context and can't take RTNL mutex, so use
863 * spin_lock_bh to lookup the neigh table. bh is used since
864 * netevent can be called from a softirq context.
866 spin_lock_bh(&neigh_update->encap_lock);
867 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
869 spin_unlock_bh(&neigh_update->encap_lock);
873 /* This assignment is valid as long as the the neigh reference
878 /* Take a reference to ensure the neighbour and mlx5 encap
879 * entry won't be destructed until we drop the reference in
883 mlx5e_rep_neigh_entry_hold(nhe);
885 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
886 mlx5e_rep_neigh_entry_release(nhe);
889 spin_unlock_bh(&neigh_update->encap_lock);
892 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
895 /* We check the device is present since we don't care about
896 * changes in the default table, we only care about changes
897 * done per device delay prob time parameter.
899 #if IS_ENABLED(CONFIG_IPV6)
900 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
902 if (!p->dev || p->tbl != &arp_tbl)
906 /* We are in atomic context and can't take RTNL mutex,
907 * so use spin_lock_bh to walk the neigh list and look for
908 * the relevant device. bh is used since netevent can be
909 * called from a softirq context.
911 spin_lock_bh(&neigh_update->encap_lock);
912 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
913 if (p->dev == nhe->m_neigh.dev) {
918 spin_unlock_bh(&neigh_update->encap_lock);
922 neigh_update->min_interval = min_t(unsigned long,
923 NEIGH_VAR(p, DELAY_PROBE_TIME),
924 neigh_update->min_interval);
925 mlx5_fc_update_sampling_interval(priv->mdev,
926 neigh_update->min_interval);
932 static const struct rhashtable_params mlx5e_neigh_ht_params = {
933 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
934 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
935 .key_len = sizeof(struct mlx5e_neigh),
936 .automatic_shrinking = true,
939 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
941 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
944 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
948 INIT_LIST_HEAD(&neigh_update->neigh_list);
949 spin_lock_init(&neigh_update->encap_lock);
950 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
951 mlx5e_rep_neigh_stats_work);
952 mlx5e_rep_neigh_update_init_interval(rpriv);
954 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
955 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
961 rhashtable_destroy(&neigh_update->neigh_ht);
965 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
967 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
968 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
970 unregister_netevent_notifier(&neigh_update->netevent_nb);
972 flush_workqueue(priv->wq); /* flush neigh update works */
974 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
976 rhashtable_destroy(&neigh_update->neigh_ht);
979 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
980 struct mlx5e_neigh_hash_entry *nhe)
982 struct mlx5e_rep_priv *rpriv = priv->ppriv;
985 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
987 mlx5e_neigh_ht_params);
991 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
996 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
997 struct mlx5e_neigh_hash_entry *nhe)
999 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1001 spin_lock_bh(&rpriv->neigh_update.encap_lock);
1003 list_del(&nhe->neigh_list);
1005 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1007 mlx5e_neigh_ht_params);
1008 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
1011 /* This function must only be called under RTNL lock or under the
1012 * representor's encap_lock in case RTNL mutex can't be held.
1014 static struct mlx5e_neigh_hash_entry *
1015 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1016 struct mlx5e_neigh *m_neigh)
1018 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1019 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1021 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1022 mlx5e_neigh_ht_params);
1025 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1026 struct mlx5e_encap_entry *e,
1027 struct mlx5e_neigh_hash_entry **nhe)
1031 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1035 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1036 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1037 INIT_LIST_HEAD(&(*nhe)->encap_list);
1038 refcount_set(&(*nhe)->refcnt, 1);
1040 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1050 static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
1051 struct mlx5e_neigh_hash_entry *nhe)
1053 /* The neigh hash entry must be removed from the hash table regardless
1054 * of the reference count value, so it won't be found by the next
1055 * neigh notification call. The neigh hash entry reference count is
1056 * incremented only during creation and neigh notification calls and
1057 * protects from freeing the nhe struct.
1059 mlx5e_rep_neigh_entry_remove(priv, nhe);
1060 mlx5e_rep_neigh_entry_release(nhe);
1063 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1064 struct mlx5e_encap_entry *e)
1066 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1067 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1068 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1069 struct mlx5e_neigh_hash_entry *nhe;
1072 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1075 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1077 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1079 mlx5_tun_entropy_refcount_dec(tun_entropy,
1084 list_add(&e->encap_list, &nhe->encap_list);
1088 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1089 struct mlx5e_encap_entry *e)
1091 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1092 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1093 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1094 struct mlx5e_neigh_hash_entry *nhe;
1096 list_del(&e->encap_list);
1097 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1099 if (list_empty(&nhe->encap_list))
1100 mlx5e_rep_neigh_entry_destroy(priv, nhe);
1101 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
1104 static int mlx5e_vf_rep_open(struct net_device *dev)
1106 struct mlx5e_priv *priv = netdev_priv(dev);
1107 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1108 struct mlx5_eswitch_rep *rep = rpriv->rep;
1111 mutex_lock(&priv->state_lock);
1112 err = mlx5e_open_locked(dev);
1116 if (!mlx5_modify_vport_admin_state(priv->mdev,
1117 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1119 MLX5_VPORT_ADMIN_STATE_UP))
1120 netif_carrier_on(dev);
1123 mutex_unlock(&priv->state_lock);
1127 static int mlx5e_vf_rep_close(struct net_device *dev)
1129 struct mlx5e_priv *priv = netdev_priv(dev);
1130 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1131 struct mlx5_eswitch_rep *rep = rpriv->rep;
1134 mutex_lock(&priv->state_lock);
1135 mlx5_modify_vport_admin_state(priv->mdev,
1136 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1138 MLX5_VPORT_ADMIN_STATE_DOWN);
1139 ret = mlx5e_close_locked(dev);
1140 mutex_unlock(&priv->state_lock);
1144 static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1145 char *buf, size_t len)
1147 struct mlx5e_priv *priv = netdev_priv(dev);
1148 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1149 struct mlx5_eswitch_rep *rep = rpriv->rep;
1153 fn = PCI_FUNC(priv->mdev->pdev->devfn);
1154 if (fn >= MLX5_MAX_PORTS)
1157 if (rep->vport == MLX5_VPORT_UPLINK)
1158 ret = snprintf(buf, len, "p%d", fn);
1160 ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);
1169 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1170 struct tc_cls_flower_offload *cls_flower, int flags)
1172 switch (cls_flower->command) {
1173 case TC_CLSFLOWER_REPLACE:
1174 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1176 case TC_CLSFLOWER_DESTROY:
1177 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1179 case TC_CLSFLOWER_STATS:
1180 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1187 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1190 struct mlx5e_priv *priv = cb_priv;
1193 case TC_SETUP_CLSFLOWER:
1194 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
1195 MLX5E_TC_ESW_OFFLOAD);
1201 static int mlx5e_rep_setup_tc_block(struct net_device *dev,
1202 struct tc_block_offload *f)
1204 struct mlx5e_priv *priv = netdev_priv(dev);
1206 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1209 switch (f->command) {
1211 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
1212 priv, priv, f->extack);
1213 case TC_BLOCK_UNBIND:
1214 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
1221 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1225 case TC_SETUP_BLOCK:
1226 return mlx5e_rep_setup_tc_block(dev, type_data);
1232 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1234 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1235 struct mlx5_eswitch_rep *rep;
1237 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1240 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1244 return (rep->vport == MLX5_VPORT_UPLINK);
1247 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1250 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1258 mlx5e_get_sw_stats64(const struct net_device *dev,
1259 struct rtnl_link_stats64 *stats)
1261 struct mlx5e_priv *priv = netdev_priv(dev);
1263 mlx5e_fold_sw_stats64(priv, stats);
1267 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1271 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1272 return mlx5e_get_sw_stats64(dev, sp);
1279 mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1281 struct mlx5e_priv *priv = netdev_priv(dev);
1283 /* update HW stats in background for next time */
1284 mlx5e_queue_update_stats(priv);
1285 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1288 static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
1290 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1293 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1295 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
1298 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1300 struct sockaddr *saddr = addr;
1302 if (!is_valid_ether_addr(saddr->sa_data))
1303 return -EADDRNOTAVAIL;
1305 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1309 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1312 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1317 /* allow setting 0-vid for compatibility with libvirt */
1321 static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
1322 .ndo_open = mlx5e_vf_rep_open,
1323 .ndo_stop = mlx5e_vf_rep_close,
1324 .ndo_start_xmit = mlx5e_xmit,
1325 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
1326 .ndo_setup_tc = mlx5e_rep_setup_tc,
1327 .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
1328 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1329 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1330 .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
1331 .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
1334 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1335 .ndo_open = mlx5e_open,
1336 .ndo_stop = mlx5e_close,
1337 .ndo_start_xmit = mlx5e_xmit,
1338 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
1339 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
1340 .ndo_setup_tc = mlx5e_rep_setup_tc,
1341 .ndo_get_stats64 = mlx5e_get_stats,
1342 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1343 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1344 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
1345 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1346 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1347 .ndo_features_check = mlx5e_features_check,
1348 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1349 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1350 .ndo_get_vf_config = mlx5e_get_vf_config,
1351 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1352 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
1353 .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
1356 bool mlx5e_eswitch_rep(struct net_device *netdev)
1358 if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep ||
1359 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1365 static void mlx5e_build_rep_params(struct net_device *netdev)
1367 struct mlx5e_priv *priv = netdev_priv(netdev);
1368 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1369 struct mlx5_eswitch_rep *rep = rpriv->rep;
1370 struct mlx5_core_dev *mdev = priv->mdev;
1371 struct mlx5e_params *params;
1373 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1374 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1375 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1377 params = &priv->channels.params;
1378 params->hard_mtu = MLX5E_ETH_HARD_MTU;
1379 params->sw_mtu = netdev->mtu;
1382 if (rep->vport == MLX5_VPORT_UPLINK)
1383 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1385 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
1388 mlx5e_build_rq_params(mdev, params);
1390 /* CQ moderation params */
1391 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1392 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1395 params->tunneled_offload_en = false;
1397 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
1400 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1403 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1405 struct mlx5e_priv *priv = netdev_priv(netdev);
1406 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1407 struct mlx5_eswitch_rep *rep = rpriv->rep;
1408 struct mlx5_core_dev *mdev = priv->mdev;
1410 if (rep->vport == MLX5_VPORT_UPLINK) {
1411 SET_NETDEV_DEV(netdev, mdev->device);
1412 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1413 /* we want a persistent mac for the uplink rep */
1414 mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
1415 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1416 #ifdef CONFIG_MLX5_CORE_EN_DCB
1417 if (MLX5_CAP_GEN(mdev, qos))
1418 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1421 netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
1422 eth_hw_addr_random(netdev);
1423 netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
1426 netdev->watchdog_timeo = 15 * HZ;
1429 netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
1430 netdev->hw_features |= NETIF_F_HW_TC;
1432 netdev->hw_features |= NETIF_F_SG;
1433 netdev->hw_features |= NETIF_F_IP_CSUM;
1434 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1435 netdev->hw_features |= NETIF_F_GRO;
1436 netdev->hw_features |= NETIF_F_TSO;
1437 netdev->hw_features |= NETIF_F_TSO6;
1438 netdev->hw_features |= NETIF_F_RXCSUM;
1440 if (rep->vport != MLX5_VPORT_UPLINK)
1441 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1443 netdev->features |= netdev->hw_features;
1446 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1447 struct net_device *netdev,
1448 const struct mlx5e_profile *profile,
1451 struct mlx5e_priv *priv = netdev_priv(netdev);
1454 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1458 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1460 mlx5e_build_rep_params(netdev);
1461 mlx5e_build_rep_netdev(netdev);
1463 mlx5e_timestamp_init(priv);
1468 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1470 mlx5e_netdev_cleanup(priv->netdev, priv);
1473 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1475 struct ttc_params ttc_params = {};
1478 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1479 MLX5_FLOW_NAMESPACE_KERNEL);
1481 /* The inner_ttc in the ttc params is intentionally not set */
1482 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1483 mlx5e_set_ttc_ft_params(&ttc_params);
1484 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1485 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1487 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1489 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1495 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1497 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1498 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1499 struct mlx5_eswitch_rep *rep = rpriv->rep;
1500 struct mlx5_flow_handle *flow_rule;
1501 struct mlx5_flow_destination dest;
1503 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1504 dest.tir_num = priv->direct_tir[0].tirn;
1505 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1508 if (IS_ERR(flow_rule))
1509 return PTR_ERR(flow_rule);
1510 rpriv->vport_rx_rule = flow_rule;
1514 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1516 struct mlx5_core_dev *mdev = priv->mdev;
1519 mlx5e_init_l2_addr(priv);
1521 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1523 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1527 err = mlx5e_create_indirect_rqt(priv);
1529 goto err_close_drop_rq;
1531 err = mlx5e_create_direct_rqts(priv);
1533 goto err_destroy_indirect_rqts;
1535 err = mlx5e_create_indirect_tirs(priv, false);
1537 goto err_destroy_direct_rqts;
1539 err = mlx5e_create_direct_tirs(priv);
1541 goto err_destroy_indirect_tirs;
1543 err = mlx5e_create_rep_ttc_table(priv);
1545 goto err_destroy_direct_tirs;
1547 err = mlx5e_create_rep_vport_rx_rule(priv);
1549 goto err_destroy_ttc_table;
1553 err_destroy_ttc_table:
1554 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1555 err_destroy_direct_tirs:
1556 mlx5e_destroy_direct_tirs(priv);
1557 err_destroy_indirect_tirs:
1558 mlx5e_destroy_indirect_tirs(priv, false);
1559 err_destroy_direct_rqts:
1560 mlx5e_destroy_direct_rqts(priv);
1561 err_destroy_indirect_rqts:
1562 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1564 mlx5e_close_drop_rq(&priv->drop_rq);
1568 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1570 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1572 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1573 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1574 mlx5e_destroy_direct_tirs(priv);
1575 mlx5e_destroy_indirect_tirs(priv, false);
1576 mlx5e_destroy_direct_rqts(priv);
1577 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1578 mlx5e_close_drop_rq(&priv->drop_rq);
1581 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1583 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1584 struct mlx5_rep_uplink_priv *uplink_priv;
1587 err = mlx5e_create_tises(priv);
1589 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1593 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1594 uplink_priv = &rpriv->uplink_priv;
1596 INIT_LIST_HEAD(&uplink_priv->unready_flows);
1598 /* init shared tc flow table */
1599 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1603 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1605 /* init indirect block notifications */
1606 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1607 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1608 err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1610 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1611 goto tc_esw_cleanup;
1618 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1620 for (tc = 0; tc < priv->profile->max_tc; tc++)
1621 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1625 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1627 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1630 for (tc = 0; tc < priv->profile->max_tc; tc++)
1631 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1633 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1634 /* clean indirect TC block notifications */
1635 unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1636 mlx5e_rep_indr_clean_block_privs(rpriv);
1638 /* delete shared tc flow table */
1639 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1643 static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
1645 mlx5e_set_netdev_mtu_boundaries(priv);
1648 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1650 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1652 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1653 struct mlx5_eqe *eqe = data;
1655 switch (eqe->sub_type) {
1656 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1657 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1658 queue_work(priv->wq, &priv->update_carrier_work);
1667 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1668 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1670 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1678 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1680 struct net_device *netdev = priv->netdev;
1681 struct mlx5_core_dev *mdev = priv->mdev;
1682 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1685 netdev->min_mtu = ETH_MIN_MTU;
1686 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1687 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1688 mlx5e_set_dev_port_mtu(priv);
1690 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1691 mlx5e_tc_reoffload_flows_work);
1693 mlx5_lag_add(mdev, netdev);
1694 priv->events_nb.notifier_call = uplink_rep_async_event;
1695 mlx5_notifier_register(mdev, &priv->events_nb);
1696 #ifdef CONFIG_MLX5_CORE_EN_DCB
1697 mlx5e_dcbnl_initialize(priv);
1698 mlx5e_dcbnl_init_app(priv);
1702 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1704 struct mlx5_core_dev *mdev = priv->mdev;
1705 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1707 #ifdef CONFIG_MLX5_CORE_EN_DCB
1708 mlx5e_dcbnl_delete_app(priv);
1710 mlx5_notifier_unregister(mdev, &priv->events_nb);
1711 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
1712 mlx5_lag_remove(mdev);
1715 static const struct mlx5e_profile mlx5e_vf_rep_profile = {
1716 .init = mlx5e_init_rep,
1717 .cleanup = mlx5e_cleanup_rep,
1718 .init_rx = mlx5e_init_rep_rx,
1719 .cleanup_rx = mlx5e_cleanup_rep_rx,
1720 .init_tx = mlx5e_init_rep_tx,
1721 .cleanup_tx = mlx5e_cleanup_rep_tx,
1722 .enable = mlx5e_vf_rep_enable,
1723 .update_stats = mlx5e_vf_rep_update_hw_counters,
1724 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1725 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1729 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1730 .init = mlx5e_init_rep,
1731 .cleanup = mlx5e_cleanup_rep,
1732 .init_rx = mlx5e_init_rep_rx,
1733 .cleanup_rx = mlx5e_cleanup_rep_rx,
1734 .init_tx = mlx5e_init_rep_tx,
1735 .cleanup_tx = mlx5e_cleanup_rep_tx,
1736 .enable = mlx5e_uplink_rep_enable,
1737 .disable = mlx5e_uplink_rep_disable,
1738 .update_stats = mlx5e_uplink_rep_update_hw_counters,
1739 .update_carrier = mlx5e_update_carrier,
1740 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1741 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1742 .max_tc = MLX5E_MAX_NUM_TC,
1745 /* e-Switch vport representors */
1747 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1749 const struct mlx5e_profile *profile;
1750 struct mlx5e_rep_priv *rpriv;
1751 struct net_device *netdev;
1754 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1758 /* rpriv->rep to be looked up when profile->init() is called */
1761 nch = mlx5e_get_max_num_channels(dev);
1762 profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
1763 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
1765 pr_warn("Failed to create representor netdev for vport %d\n",
1771 rpriv->netdev = netdev;
1772 rep->rep_if[REP_ETH].priv = rpriv;
1773 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1775 if (rep->vport == MLX5_VPORT_UPLINK) {
1776 err = mlx5e_create_mdev_resources(dev);
1778 goto err_destroy_netdev;
1781 err = mlx5e_attach_netdev(netdev_priv(netdev));
1783 pr_warn("Failed to attach representor netdev for vport %d\n",
1785 goto err_destroy_mdev_resources;
1788 err = mlx5e_rep_neigh_init(rpriv);
1790 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1792 goto err_detach_netdev;
1795 err = register_netdev(netdev);
1797 pr_warn("Failed to register representor netdev for vport %d\n",
1799 goto err_neigh_cleanup;
1805 mlx5e_rep_neigh_cleanup(rpriv);
1808 mlx5e_detach_netdev(netdev_priv(netdev));
1810 err_destroy_mdev_resources:
1811 if (rep->vport == MLX5_VPORT_UPLINK)
1812 mlx5e_destroy_mdev_resources(dev);
1815 mlx5e_destroy_netdev(netdev_priv(netdev));
1821 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1823 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1824 struct net_device *netdev = rpriv->netdev;
1825 struct mlx5e_priv *priv = netdev_priv(netdev);
1826 void *ppriv = priv->ppriv;
1828 unregister_netdev(netdev);
1829 mlx5e_rep_neigh_cleanup(rpriv);
1830 mlx5e_detach_netdev(priv);
1831 if (rep->vport == MLX5_VPORT_UPLINK)
1832 mlx5e_destroy_mdev_resources(priv->mdev);
1833 mlx5e_destroy_netdev(priv);
1834 kfree(ppriv); /* mlx5e_rep_priv */
1837 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1839 struct mlx5e_rep_priv *rpriv;
1841 rpriv = mlx5e_rep_to_rep_priv(rep);
1843 return rpriv->netdev;
1846 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1848 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1849 struct mlx5_eswitch_rep_if rep_if = {};
1851 rep_if.load = mlx5e_vport_rep_load;
1852 rep_if.unload = mlx5e_vport_rep_unload;
1853 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1855 mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH);
1858 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1860 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1862 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);