2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
40 #include <net/devlink.h>
41 #include <net/ipv6_stubs.h>
44 #include "eswitch_offloads_chains.h"
48 #include "en/tc_tun.h"
50 #include "lib/port_tun.h"
52 #define CREATE_TRACE_POINTS
53 #include "diag/en_rep_tracepoint.h"
55 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
56 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
57 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
59 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
61 struct mlx5e_rep_indr_block_priv {
62 struct net_device *netdev;
63 struct mlx5e_rep_priv *rpriv;
65 struct list_head list;
68 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
69 struct net_device *netdev);
71 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
72 struct ethtool_drvinfo *drvinfo)
74 struct mlx5e_priv *priv = netdev_priv(dev);
75 struct mlx5_core_dev *mdev = priv->mdev;
77 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
78 sizeof(drvinfo->driver));
79 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
80 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
82 fw_rev_maj(mdev), fw_rev_min(mdev),
83 fw_rev_sub(mdev), mdev->board_id);
86 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
87 struct ethtool_drvinfo *drvinfo)
89 struct mlx5e_priv *priv = netdev_priv(dev);
91 mlx5e_rep_get_drvinfo(dev, drvinfo);
92 strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
93 sizeof(drvinfo->bus_info));
96 static const struct counter_desc sw_rep_stats_desc[] = {
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
104 u64 vport_rx_packets;
105 u64 vport_tx_packets;
110 static const struct counter_desc vport_rep_stats_desc[] = {
111 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
112 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
113 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
114 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
117 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
118 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
120 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
122 return NUM_VPORT_REP_SW_COUNTERS;
125 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
129 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
130 strcpy(data + (idx++) * ETH_GSTRING_LEN,
131 sw_rep_stats_desc[i].format);
135 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
139 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
140 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
141 sw_rep_stats_desc, i);
145 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
147 struct mlx5e_sw_stats *s = &priv->stats.sw;
148 struct rtnl_link_stats64 stats64 = {};
150 memset(s, 0, sizeof(*s));
151 mlx5e_fold_sw_stats64(priv, &stats64);
153 s->rx_packets = stats64.rx_packets;
154 s->rx_bytes = stats64.rx_bytes;
155 s->tx_packets = stats64.tx_packets;
156 s->tx_bytes = stats64.tx_bytes;
157 s->tx_queue_dropped = stats64.tx_dropped;
160 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
162 return NUM_VPORT_REP_HW_COUNTERS;
165 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
169 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
170 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
174 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
178 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
179 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
180 vport_rep_stats_desc, i);
184 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
186 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
187 struct mlx5e_rep_priv *rpriv = priv->ppriv;
188 struct mlx5_eswitch_rep *rep = rpriv->rep;
189 struct rtnl_link_stats64 *vport_stats;
190 struct ifla_vf_stats vf_stats;
193 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
195 netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
200 vport_stats = &priv->stats.vf_vport;
201 /* flip tx/rx as we are reporting the counters for the switch vport */
202 vport_stats->rx_packets = vf_stats.tx_packets;
203 vport_stats->rx_bytes = vf_stats.tx_bytes;
204 vport_stats->tx_packets = vf_stats.rx_packets;
205 vport_stats->tx_bytes = vf_stats.rx_bytes;
208 static void mlx5e_rep_get_strings(struct net_device *dev,
209 u32 stringset, uint8_t *data)
211 struct mlx5e_priv *priv = netdev_priv(dev);
215 mlx5e_stats_fill_strings(priv, data);
220 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
221 struct ethtool_stats *stats, u64 *data)
223 struct mlx5e_priv *priv = netdev_priv(dev);
225 mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
228 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
230 struct mlx5e_priv *priv = netdev_priv(dev);
234 return mlx5e_stats_total_num(priv);
240 static void mlx5e_rep_get_ringparam(struct net_device *dev,
241 struct ethtool_ringparam *param)
243 struct mlx5e_priv *priv = netdev_priv(dev);
245 mlx5e_ethtool_get_ringparam(priv, param);
248 static int mlx5e_rep_set_ringparam(struct net_device *dev,
249 struct ethtool_ringparam *param)
251 struct mlx5e_priv *priv = netdev_priv(dev);
253 return mlx5e_ethtool_set_ringparam(priv, param);
256 static void mlx5e_rep_get_channels(struct net_device *dev,
257 struct ethtool_channels *ch)
259 struct mlx5e_priv *priv = netdev_priv(dev);
261 mlx5e_ethtool_get_channels(priv, ch);
264 static int mlx5e_rep_set_channels(struct net_device *dev,
265 struct ethtool_channels *ch)
267 struct mlx5e_priv *priv = netdev_priv(dev);
269 return mlx5e_ethtool_set_channels(priv, ch);
272 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
273 struct ethtool_coalesce *coal)
275 struct mlx5e_priv *priv = netdev_priv(netdev);
277 return mlx5e_ethtool_get_coalesce(priv, coal);
280 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
281 struct ethtool_coalesce *coal)
283 struct mlx5e_priv *priv = netdev_priv(netdev);
285 return mlx5e_ethtool_set_coalesce(priv, coal);
288 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
290 struct mlx5e_priv *priv = netdev_priv(netdev);
292 return mlx5e_ethtool_get_rxfh_key_size(priv);
295 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
297 struct mlx5e_priv *priv = netdev_priv(netdev);
299 return mlx5e_ethtool_get_rxfh_indir_size(priv);
302 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
303 struct ethtool_pauseparam *pauseparam)
305 struct mlx5e_priv *priv = netdev_priv(netdev);
307 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
310 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
311 struct ethtool_pauseparam *pauseparam)
313 struct mlx5e_priv *priv = netdev_priv(netdev);
315 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
318 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
319 struct ethtool_link_ksettings *link_ksettings)
321 struct mlx5e_priv *priv = netdev_priv(netdev);
323 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
326 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
327 const struct ethtool_link_ksettings *link_ksettings)
329 struct mlx5e_priv *priv = netdev_priv(netdev);
331 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
334 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
335 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
336 ETHTOOL_COALESCE_MAX_FRAMES |
337 ETHTOOL_COALESCE_USE_ADAPTIVE,
338 .get_drvinfo = mlx5e_rep_get_drvinfo,
339 .get_link = ethtool_op_get_link,
340 .get_strings = mlx5e_rep_get_strings,
341 .get_sset_count = mlx5e_rep_get_sset_count,
342 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
343 .get_ringparam = mlx5e_rep_get_ringparam,
344 .set_ringparam = mlx5e_rep_set_ringparam,
345 .get_channels = mlx5e_rep_get_channels,
346 .set_channels = mlx5e_rep_set_channels,
347 .get_coalesce = mlx5e_rep_get_coalesce,
348 .set_coalesce = mlx5e_rep_set_coalesce,
349 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
350 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
353 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
354 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
355 ETHTOOL_COALESCE_MAX_FRAMES |
356 ETHTOOL_COALESCE_USE_ADAPTIVE,
357 .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
358 .get_link = ethtool_op_get_link,
359 .get_strings = mlx5e_rep_get_strings,
360 .get_sset_count = mlx5e_rep_get_sset_count,
361 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
362 .get_ringparam = mlx5e_rep_get_ringparam,
363 .set_ringparam = mlx5e_rep_set_ringparam,
364 .get_channels = mlx5e_rep_get_channels,
365 .set_channels = mlx5e_rep_set_channels,
366 .get_coalesce = mlx5e_rep_get_coalesce,
367 .set_coalesce = mlx5e_rep_set_coalesce,
368 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
369 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
370 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
371 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
372 .get_rxfh = mlx5e_get_rxfh,
373 .set_rxfh = mlx5e_set_rxfh,
374 .get_rxnfc = mlx5e_get_rxnfc,
375 .set_rxnfc = mlx5e_set_rxnfc,
376 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
377 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
380 static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
381 struct netdev_phys_item_id *ppid)
383 struct mlx5e_priv *priv;
386 priv = netdev_priv(dev);
388 parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
389 ppid->id_len = sizeof(parent_id);
390 memcpy(ppid->id, &parent_id, sizeof(parent_id));
393 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
394 struct mlx5_eswitch_rep *rep)
396 struct mlx5e_rep_sq *rep_sq, *tmp;
397 struct mlx5e_rep_priv *rpriv;
399 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
402 rpriv = mlx5e_rep_to_rep_priv(rep);
403 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
404 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
405 list_del(&rep_sq->list);
410 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
411 struct mlx5_eswitch_rep *rep,
412 u32 *sqns_array, int sqns_num)
414 struct mlx5_flow_handle *flow_rule;
415 struct mlx5e_rep_priv *rpriv;
416 struct mlx5e_rep_sq *rep_sq;
420 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
423 rpriv = mlx5e_rep_to_rep_priv(rep);
424 for (i = 0; i < sqns_num; i++) {
425 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
431 /* Add re-inject rule to the PF/representor sqs */
432 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
435 if (IS_ERR(flow_rule)) {
436 err = PTR_ERR(flow_rule);
440 rep_sq->send_to_vport_rule = flow_rule;
441 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
446 mlx5e_sqs2vport_stop(esw, rep);
450 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
452 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
453 struct mlx5e_rep_priv *rpriv = priv->ppriv;
454 struct mlx5_eswitch_rep *rep = rpriv->rep;
455 struct mlx5e_channel *c;
456 int n, tc, num_sqs = 0;
460 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
464 for (n = 0; n < priv->channels.num; n++) {
465 c = priv->channels.c[n];
466 for (tc = 0; tc < c->num_tc; tc++)
467 sqs[num_sqs++] = c->sq[tc].sqn;
470 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
475 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
479 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
481 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
482 struct mlx5e_rep_priv *rpriv = priv->ppriv;
483 struct mlx5_eswitch_rep *rep = rpriv->rep;
485 mlx5e_sqs2vport_stop(esw, rep);
488 static unsigned long mlx5e_rep_ipv6_interval(void)
490 if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
491 return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
496 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
498 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
499 unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
500 struct net_device *netdev = rpriv->netdev;
501 struct mlx5e_priv *priv = netdev_priv(netdev);
503 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
504 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
507 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
509 struct mlx5e_rep_priv *rpriv = priv->ppriv;
510 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
512 mlx5_fc_queue_stats_work(priv->mdev,
513 &neigh_update->neigh_stats_work,
514 neigh_update->min_interval);
517 static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
519 return refcount_inc_not_zero(&nhe->refcnt);
522 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
524 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
526 if (refcount_dec_and_test(&nhe->refcnt)) {
527 mlx5e_rep_neigh_entry_remove(nhe);
532 static struct mlx5e_neigh_hash_entry *
533 mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
534 struct mlx5e_neigh_hash_entry *nhe)
536 struct mlx5e_neigh_hash_entry *next = NULL;
541 list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
543 struct mlx5e_neigh_hash_entry,
545 list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
546 struct mlx5e_neigh_hash_entry,
549 next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
551 struct mlx5e_neigh_hash_entry,
553 if (mlx5e_rep_neigh_entry_hold(next))
559 mlx5e_rep_neigh_entry_release(nhe);
564 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
566 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
567 neigh_update.neigh_stats_work.work);
568 struct net_device *netdev = rpriv->netdev;
569 struct mlx5e_priv *priv = netdev_priv(netdev);
570 struct mlx5e_neigh_hash_entry *nhe = NULL;
573 if (!list_empty(&rpriv->neigh_update.neigh_list))
574 mlx5e_rep_queue_neigh_stats_work(priv);
576 while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
577 mlx5e_tc_update_neigh_used_value(nhe);
582 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
583 struct mlx5e_encap_entry *e,
584 bool neigh_connected,
585 unsigned char ha[ETH_ALEN])
587 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
588 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
589 bool encap_connected;
590 LIST_HEAD(flow_list);
594 /* wait for encap to be fully initialized */
595 wait_for_completion(&e->res_ready);
597 mutex_lock(&esw->offloads.encap_tbl_lock);
598 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
599 if (e->compl_result < 0 || (encap_connected == neigh_connected &&
600 ether_addr_equal(e->h_dest, ha)))
603 mlx5e_take_all_encap_flows(e, &flow_list);
605 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
606 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
607 mlx5e_tc_encap_flows_del(priv, e, &flow_list);
609 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
610 ether_addr_copy(e->h_dest, ha);
611 ether_addr_copy(eth->h_dest, ha);
612 /* Update the encap source mac, in case that we delete
613 * the flows when encap source mac changed.
615 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
617 mlx5e_tc_encap_flows_add(priv, e, &flow_list);
620 mutex_unlock(&esw->offloads.encap_tbl_lock);
621 mlx5e_put_encap_flow_list(priv, &flow_list);
624 static void mlx5e_rep_neigh_update(struct work_struct *work)
626 struct mlx5e_neigh_hash_entry *nhe =
627 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
628 struct neighbour *n = nhe->n;
629 struct mlx5e_encap_entry *e;
630 unsigned char ha[ETH_ALEN];
631 struct mlx5e_priv *priv;
632 bool neigh_connected;
637 /* If these parameters are changed after we release the lock,
638 * we'll receive another event letting us know about it.
639 * We use this lock to avoid inconsistency between the neigh validity
640 * and it's hw address.
642 read_lock_bh(&n->lock);
643 memcpy(ha, n->ha, ETH_ALEN);
644 nud_state = n->nud_state;
646 read_unlock_bh(&n->lock);
648 neigh_connected = (nud_state & NUD_VALID) && !dead;
650 trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
652 list_for_each_entry(e, &nhe->encap_list, encap_list) {
653 if (!mlx5e_encap_take(e))
656 priv = netdev_priv(e->out_dev);
657 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
658 mlx5e_encap_put(priv, e);
660 mlx5e_rep_neigh_entry_release(nhe);
665 static struct mlx5e_rep_indr_block_priv *
666 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
667 struct net_device *netdev)
669 struct mlx5e_rep_indr_block_priv *cb_priv;
671 /* All callback list access should be protected by RTNL. */
674 list_for_each_entry(cb_priv,
675 &rpriv->uplink_priv.tc_indr_block_priv_list,
677 if (cb_priv->netdev == netdev)
683 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
685 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
686 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
688 list_for_each_entry_safe(cb_priv, temp, head, list) {
689 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
695 mlx5e_rep_indr_offload(struct net_device *netdev,
696 struct flow_cls_offload *flower,
697 struct mlx5e_rep_indr_block_priv *indr_priv)
699 unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
700 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
703 switch (flower->command) {
704 case FLOW_CLS_REPLACE:
705 err = mlx5e_configure_flower(netdev, priv, flower, flags);
707 case FLOW_CLS_DESTROY:
708 err = mlx5e_delete_flower(netdev, priv, flower, flags);
711 err = mlx5e_stats_flower(netdev, priv, flower, flags);
720 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
721 void *type_data, void *indr_priv)
723 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
726 case TC_SETUP_CLSFLOWER:
727 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
733 static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
735 struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
737 list_del(&indr_priv->list);
741 static LIST_HEAD(mlx5e_block_cb_list);
744 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
745 struct mlx5e_rep_priv *rpriv,
746 struct flow_block_offload *f)
748 struct mlx5e_rep_indr_block_priv *indr_priv;
749 struct flow_block_cb *block_cb;
751 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
754 f->unlocked_driver_cb = true;
755 f->driver_block_list = &mlx5e_block_cb_list;
757 switch (f->command) {
758 case FLOW_BLOCK_BIND:
759 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
763 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
767 indr_priv->netdev = netdev;
768 indr_priv->rpriv = rpriv;
769 list_add(&indr_priv->list,
770 &rpriv->uplink_priv.tc_indr_block_priv_list);
772 block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
773 indr_priv, indr_priv,
774 mlx5e_rep_indr_tc_block_unbind);
775 if (IS_ERR(block_cb)) {
776 list_del(&indr_priv->list);
778 return PTR_ERR(block_cb);
780 flow_block_cb_add(block_cb, f);
781 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
784 case FLOW_BLOCK_UNBIND:
785 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
789 block_cb = flow_block_cb_lookup(f->block,
790 mlx5e_rep_indr_setup_block_cb,
795 flow_block_cb_remove(block_cb, f);
796 list_del(&block_cb->driver_list);
805 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
806 enum tc_setup_type type, void *type_data)
810 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
817 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
818 struct net_device *netdev)
822 err = __flow_indr_block_cb_register(netdev, rpriv,
823 mlx5e_rep_indr_setup_tc_cb,
826 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
828 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
829 netdev_name(netdev), err);
834 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
835 struct net_device *netdev)
837 __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
841 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
842 unsigned long event, void *ptr)
844 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
845 uplink_priv.netdevice_nb);
846 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
847 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
849 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
850 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
854 case NETDEV_REGISTER:
855 mlx5e_rep_indr_register_block(rpriv, netdev);
857 case NETDEV_UNREGISTER:
858 mlx5e_rep_indr_unregister_block(rpriv, netdev);
865 mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
866 struct mlx5e_neigh_hash_entry *nhe,
869 /* Take a reference to ensure the neighbour and mlx5 encap
870 * entry won't be destructed until we drop the reference in
875 /* This assignment is valid as long as the the neigh reference
880 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
881 mlx5e_rep_neigh_entry_release(nhe);
886 static struct mlx5e_neigh_hash_entry *
887 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
888 struct mlx5e_neigh *m_neigh);
890 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
891 unsigned long event, void *ptr)
893 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
894 neigh_update.netevent_nb);
895 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
896 struct net_device *netdev = rpriv->netdev;
897 struct mlx5e_priv *priv = netdev_priv(netdev);
898 struct mlx5e_neigh_hash_entry *nhe = NULL;
899 struct mlx5e_neigh m_neigh = {};
900 struct neigh_parms *p;
905 case NETEVENT_NEIGH_UPDATE:
907 #if IS_ENABLED(CONFIG_IPV6)
908 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
910 if (n->tbl != &arp_tbl)
914 m_neigh.dev = n->dev;
915 m_neigh.family = n->ops->family;
916 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
919 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
924 mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
927 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
930 /* We check the device is present since we don't care about
931 * changes in the default table, we only care about changes
932 * done per device delay prob time parameter.
934 #if IS_ENABLED(CONFIG_IPV6)
935 if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
937 if (!p->dev || p->tbl != &arp_tbl)
942 list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
944 if (p->dev == nhe->m_neigh.dev) {
953 neigh_update->min_interval = min_t(unsigned long,
954 NEIGH_VAR(p, DELAY_PROBE_TIME),
955 neigh_update->min_interval);
956 mlx5_fc_update_sampling_interval(priv->mdev,
957 neigh_update->min_interval);
963 static const struct rhashtable_params mlx5e_neigh_ht_params = {
964 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
965 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
966 .key_len = sizeof(struct mlx5e_neigh),
967 .automatic_shrinking = true,
970 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
972 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
975 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
979 INIT_LIST_HEAD(&neigh_update->neigh_list);
980 mutex_init(&neigh_update->encap_lock);
981 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
982 mlx5e_rep_neigh_stats_work);
983 mlx5e_rep_neigh_update_init_interval(rpriv);
985 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
986 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
992 rhashtable_destroy(&neigh_update->neigh_ht);
996 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
998 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
999 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1001 unregister_netevent_notifier(&neigh_update->netevent_nb);
1003 flush_workqueue(priv->wq); /* flush neigh update works */
1005 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
1007 mutex_destroy(&neigh_update->encap_lock);
1008 rhashtable_destroy(&neigh_update->neigh_ht);
1011 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
1012 struct mlx5e_neigh_hash_entry *nhe)
1014 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1017 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
1019 mlx5e_neigh_ht_params);
1023 list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
1028 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
1030 struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
1032 mutex_lock(&rpriv->neigh_update.encap_lock);
1034 list_del_rcu(&nhe->neigh_list);
1036 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1038 mlx5e_neigh_ht_params);
1039 mutex_unlock(&rpriv->neigh_update.encap_lock);
1042 /* This function must only be called under the representor's encap_lock or
1043 * inside rcu read lock section.
1045 static struct mlx5e_neigh_hash_entry *
1046 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1047 struct mlx5e_neigh *m_neigh)
1049 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1050 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1051 struct mlx5e_neigh_hash_entry *nhe;
1053 nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1054 mlx5e_neigh_ht_params);
1055 return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
1058 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1059 struct mlx5e_encap_entry *e,
1060 struct mlx5e_neigh_hash_entry **nhe)
1064 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1068 (*nhe)->priv = priv;
1069 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1070 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1071 spin_lock_init(&(*nhe)->encap_list_lock);
1072 INIT_LIST_HEAD(&(*nhe)->encap_list);
1073 refcount_set(&(*nhe)->refcnt, 1);
1075 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1085 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1086 struct mlx5e_encap_entry *e)
1088 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1089 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1090 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1091 struct mlx5e_neigh_hash_entry *nhe;
1094 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1098 mutex_lock(&rpriv->neigh_update.encap_lock);
1099 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1101 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1103 mutex_unlock(&rpriv->neigh_update.encap_lock);
1104 mlx5_tun_entropy_refcount_dec(tun_entropy,
1111 spin_lock(&nhe->encap_list_lock);
1112 list_add_rcu(&e->encap_list, &nhe->encap_list);
1113 spin_unlock(&nhe->encap_list_lock);
1115 mutex_unlock(&rpriv->neigh_update.encap_lock);
1120 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1121 struct mlx5e_encap_entry *e)
1123 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1124 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1125 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1130 spin_lock(&e->nhe->encap_list_lock);
1131 list_del_rcu(&e->encap_list);
1132 spin_unlock(&e->nhe->encap_list_lock);
1134 mlx5e_rep_neigh_entry_release(e->nhe);
1136 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
1139 static int mlx5e_rep_open(struct net_device *dev)
1141 struct mlx5e_priv *priv = netdev_priv(dev);
1142 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1143 struct mlx5_eswitch_rep *rep = rpriv->rep;
1146 mutex_lock(&priv->state_lock);
1147 err = mlx5e_open_locked(dev);
1151 if (!mlx5_modify_vport_admin_state(priv->mdev,
1152 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1154 MLX5_VPORT_ADMIN_STATE_UP))
1155 netif_carrier_on(dev);
1158 mutex_unlock(&priv->state_lock);
1162 static int mlx5e_rep_close(struct net_device *dev)
1164 struct mlx5e_priv *priv = netdev_priv(dev);
1165 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1166 struct mlx5_eswitch_rep *rep = rpriv->rep;
1169 mutex_lock(&priv->state_lock);
1170 mlx5_modify_vport_admin_state(priv->mdev,
1171 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1173 MLX5_VPORT_ADMIN_STATE_DOWN);
1174 ret = mlx5e_close_locked(dev);
1175 mutex_unlock(&priv->state_lock);
1180 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1181 struct flow_cls_offload *cls_flower, int flags)
1183 switch (cls_flower->command) {
1184 case FLOW_CLS_REPLACE:
1185 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1187 case FLOW_CLS_DESTROY:
1188 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1190 case FLOW_CLS_STATS:
1191 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1199 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
1200 struct tc_cls_matchall_offload *ma)
1202 switch (ma->command) {
1203 case TC_CLSMATCHALL_REPLACE:
1204 return mlx5e_tc_configure_matchall(priv, ma);
1205 case TC_CLSMATCHALL_DESTROY:
1206 return mlx5e_tc_delete_matchall(priv, ma);
1207 case TC_CLSMATCHALL_STATS:
1208 mlx5e_tc_stats_matchall(priv, ma);
1215 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1218 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
1219 struct mlx5e_priv *priv = cb_priv;
1222 case TC_SETUP_CLSFLOWER:
1223 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
1224 case TC_SETUP_CLSMATCHALL:
1225 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
1231 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
1234 struct flow_cls_offload tmp, *f = type_data;
1235 struct mlx5e_priv *priv = cb_priv;
1236 struct mlx5_eswitch *esw;
1237 unsigned long flags;
1240 flags = MLX5_TC_FLAG(INGRESS) |
1241 MLX5_TC_FLAG(ESW_OFFLOAD) |
1242 MLX5_TC_FLAG(FT_OFFLOAD);
1243 esw = priv->mdev->priv.eswitch;
1246 case TC_SETUP_CLSFLOWER:
1247 memcpy(&tmp, f, sizeof(*f));
1249 if (!mlx5_esw_chains_prios_supported(esw) ||
1250 tmp.common.chain_index)
1253 /* Re-use tc offload path by moving the ft flow to the
1254 * reserved ft chain.
1256 * FT offload can use prio range [0, INT_MAX], so we normalize
1257 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
1258 * as with tc, where prio 0 isn't supported.
1260 * We only support chain 0 of FT offload.
1262 if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
1264 if (tmp.common.chain_index != 0)
1267 tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
1269 err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
1270 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
1277 static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
1278 static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
1279 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1282 struct mlx5e_priv *priv = netdev_priv(dev);
1283 struct flow_block_offload *f = type_data;
1285 f->unlocked_driver_cb = true;
1288 case TC_SETUP_BLOCK:
1289 return flow_block_cb_setup_simple(type_data,
1290 &mlx5e_rep_block_tc_cb_list,
1291 mlx5e_rep_setup_tc_cb,
1294 return flow_block_cb_setup_simple(type_data,
1295 &mlx5e_rep_block_ft_cb_list,
1296 mlx5e_rep_setup_ft_cb,
1303 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1305 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1306 struct mlx5_eswitch_rep *rep;
1308 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1311 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1315 return (rep->vport == MLX5_VPORT_UPLINK);
1318 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1321 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1329 mlx5e_get_sw_stats64(const struct net_device *dev,
1330 struct rtnl_link_stats64 *stats)
1332 struct mlx5e_priv *priv = netdev_priv(dev);
1334 mlx5e_fold_sw_stats64(priv, stats);
1338 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1342 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1343 return mlx5e_get_sw_stats64(dev, sp);
1350 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1352 struct mlx5e_priv *priv = netdev_priv(dev);
1354 /* update HW stats in background for next time */
1355 mlx5e_queue_update_stats(priv);
1356 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1359 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
1361 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1364 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1366 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
1369 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1371 struct sockaddr *saddr = addr;
1373 if (!is_valid_ether_addr(saddr->sa_data))
1374 return -EADDRNOTAVAIL;
1376 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1380 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1383 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1388 /* allow setting 0-vid for compatibility with libvirt */
1392 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
1394 struct mlx5e_priv *priv = netdev_priv(dev);
1395 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1397 return &rpriv->dl_port;
1400 static const struct net_device_ops mlx5e_netdev_ops_rep = {
1401 .ndo_open = mlx5e_rep_open,
1402 .ndo_stop = mlx5e_rep_close,
1403 .ndo_start_xmit = mlx5e_xmit,
1404 .ndo_setup_tc = mlx5e_rep_setup_tc,
1405 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
1406 .ndo_get_stats64 = mlx5e_rep_get_stats,
1407 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1408 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1409 .ndo_change_mtu = mlx5e_rep_change_mtu,
1412 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1413 .ndo_open = mlx5e_open,
1414 .ndo_stop = mlx5e_close,
1415 .ndo_start_xmit = mlx5e_xmit,
1416 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
1417 .ndo_setup_tc = mlx5e_rep_setup_tc,
1418 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
1419 .ndo_get_stats64 = mlx5e_get_stats,
1420 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1421 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1422 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
1423 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1424 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1425 .ndo_features_check = mlx5e_features_check,
1426 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1427 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1428 .ndo_get_vf_config = mlx5e_get_vf_config,
1429 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1430 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
1431 .ndo_set_features = mlx5e_set_features,
1434 bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
1436 return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
1439 bool mlx5e_eswitch_rep(struct net_device *netdev)
1441 if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
1442 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1448 static void mlx5e_build_rep_params(struct net_device *netdev)
1450 struct mlx5e_priv *priv = netdev_priv(netdev);
1451 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1452 struct mlx5_eswitch_rep *rep = rpriv->rep;
1453 struct mlx5_core_dev *mdev = priv->mdev;
1454 struct mlx5e_params *params;
1456 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1457 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1458 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1460 params = &priv->channels.params;
1461 params->hard_mtu = MLX5E_ETH_HARD_MTU;
1462 params->sw_mtu = netdev->mtu;
1465 if (rep->vport == MLX5_VPORT_UPLINK)
1466 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1468 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
1471 mlx5e_build_rq_params(mdev, params);
1473 /* CQ moderation params */
1474 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1475 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1478 params->tunneled_offload_en = false;
1480 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
1483 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1486 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1488 struct mlx5e_priv *priv = netdev_priv(netdev);
1489 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1490 struct mlx5_eswitch_rep *rep = rpriv->rep;
1491 struct mlx5_core_dev *mdev = priv->mdev;
1493 if (rep->vport == MLX5_VPORT_UPLINK) {
1494 SET_NETDEV_DEV(netdev, mdev->device);
1495 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1496 /* we want a persistent mac for the uplink rep */
1497 mlx5_query_mac_address(mdev, netdev->dev_addr);
1498 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1499 #ifdef CONFIG_MLX5_CORE_EN_DCB
1500 if (MLX5_CAP_GEN(mdev, qos))
1501 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1504 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
1505 eth_hw_addr_random(netdev);
1506 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
1509 netdev->watchdog_timeo = 15 * HZ;
1511 netdev->features |= NETIF_F_NETNS_LOCAL;
1513 netdev->hw_features |= NETIF_F_HW_TC;
1514 netdev->hw_features |= NETIF_F_SG;
1515 netdev->hw_features |= NETIF_F_IP_CSUM;
1516 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1517 netdev->hw_features |= NETIF_F_GRO;
1518 netdev->hw_features |= NETIF_F_TSO;
1519 netdev->hw_features |= NETIF_F_TSO6;
1520 netdev->hw_features |= NETIF_F_RXCSUM;
1522 if (rep->vport == MLX5_VPORT_UPLINK)
1523 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1525 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1527 netdev->features |= netdev->hw_features;
1530 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1531 struct net_device *netdev,
1532 const struct mlx5e_profile *profile,
1535 struct mlx5e_priv *priv = netdev_priv(netdev);
1538 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1542 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1544 mlx5e_build_rep_params(netdev);
1545 mlx5e_build_rep_netdev(netdev);
1547 mlx5e_timestamp_init(priv);
1552 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1554 mlx5e_netdev_cleanup(priv->netdev, priv);
1557 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1559 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1560 struct mlx5_eswitch_rep *rep = rpriv->rep;
1561 struct ttc_params ttc_params = {};
1564 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1565 MLX5_FLOW_NAMESPACE_KERNEL);
1567 /* The inner_ttc in the ttc params is intentionally not set */
1568 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1569 mlx5e_set_ttc_ft_params(&ttc_params);
1571 if (rep->vport != MLX5_VPORT_UPLINK)
1572 /* To give uplik rep TTC a lower level for chaining from root ft */
1573 ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
1575 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1576 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1578 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1580 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1586 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
1588 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1589 struct mlx5_eswitch_rep *rep = rpriv->rep;
1590 struct mlx5_flow_table_attr ft_attr = {};
1591 struct mlx5_flow_namespace *ns;
1594 if (rep->vport != MLX5_VPORT_UPLINK) {
1595 /* non uplik reps will skip any bypass tables and go directly to
1598 rpriv->root_ft = priv->fs.ttc.ft.t;
1602 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
1603 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1605 netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
1609 ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
1612 rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
1613 if (IS_ERR(rpriv->root_ft)) {
1614 err = PTR_ERR(rpriv->root_ft);
1615 rpriv->root_ft = NULL;
1621 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
1623 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1624 struct mlx5_eswitch_rep *rep = rpriv->rep;
1626 if (rep->vport != MLX5_VPORT_UPLINK)
1628 mlx5_destroy_flow_table(rpriv->root_ft);
1631 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1633 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1634 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1635 struct mlx5_eswitch_rep *rep = rpriv->rep;
1636 struct mlx5_flow_handle *flow_rule;
1637 struct mlx5_flow_destination dest;
1639 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1640 dest.ft = rpriv->root_ft;
1642 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
1643 if (IS_ERR(flow_rule))
1644 return PTR_ERR(flow_rule);
1645 rpriv->vport_rx_rule = flow_rule;
1649 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1651 struct mlx5_core_dev *mdev = priv->mdev;
1654 mlx5e_init_l2_addr(priv);
1656 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1658 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1662 err = mlx5e_create_indirect_rqt(priv);
1664 goto err_close_drop_rq;
1666 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
1668 goto err_destroy_indirect_rqts;
1670 err = mlx5e_create_indirect_tirs(priv, false);
1672 goto err_destroy_direct_rqts;
1674 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
1676 goto err_destroy_indirect_tirs;
1678 err = mlx5e_create_rep_ttc_table(priv);
1680 goto err_destroy_direct_tirs;
1682 err = mlx5e_create_rep_root_ft(priv);
1684 goto err_destroy_ttc_table;
1686 err = mlx5e_create_rep_vport_rx_rule(priv);
1688 goto err_destroy_root_ft;
1690 mlx5e_ethtool_init_steering(priv);
1694 err_destroy_root_ft:
1695 mlx5e_destroy_rep_root_ft(priv);
1696 err_destroy_ttc_table:
1697 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1698 err_destroy_direct_tirs:
1699 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1700 err_destroy_indirect_tirs:
1701 mlx5e_destroy_indirect_tirs(priv, false);
1702 err_destroy_direct_rqts:
1703 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1704 err_destroy_indirect_rqts:
1705 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1707 mlx5e_close_drop_rq(&priv->drop_rq);
1711 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1713 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1715 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1716 mlx5e_destroy_rep_root_ft(priv);
1717 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1718 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1719 mlx5e_destroy_indirect_tirs(priv, false);
1720 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1721 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1722 mlx5e_close_drop_rq(&priv->drop_rq);
1725 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
1727 int err = mlx5e_init_rep_rx(priv);
1732 mlx5e_create_q_counters(priv);
1736 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
1738 mlx5e_destroy_q_counters(priv);
1739 mlx5e_cleanup_rep_rx(priv);
1742 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1744 struct mlx5_rep_uplink_priv *uplink_priv;
1745 struct net_device *netdev;
1746 struct mlx5e_priv *priv;
1749 netdev = rpriv->netdev;
1750 priv = netdev_priv(netdev);
1751 uplink_priv = &rpriv->uplink_priv;
1753 mutex_init(&uplink_priv->unready_flows_lock);
1754 INIT_LIST_HEAD(&uplink_priv->unready_flows);
1756 /* init shared tc flow table */
1757 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1761 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1763 /* init indirect block notifications */
1764 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1765 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1766 err = register_netdevice_notifier_dev_net(rpriv->netdev,
1767 &uplink_priv->netdevice_nb,
1768 &uplink_priv->netdevice_nn);
1770 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1771 goto tc_esw_cleanup;
1777 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1781 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1783 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1786 err = mlx5e_create_tises(priv);
1788 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1792 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1793 err = mlx5e_init_uplink_rep_tx(rpriv);
1801 mlx5e_destroy_tises(priv);
1805 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1807 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1809 /* clean indirect TC block notifications */
1810 unregister_netdevice_notifier_dev_net(rpriv->netdev,
1811 &uplink_priv->netdevice_nb,
1812 &uplink_priv->netdevice_nn);
1813 mlx5e_rep_indr_clean_block_privs(rpriv);
1815 /* delete shared tc flow table */
1816 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1817 mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
1820 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1822 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1824 mlx5e_destroy_tises(priv);
1826 if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1827 mlx5e_cleanup_uplink_rep_tx(rpriv);
1830 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1832 mlx5e_set_netdev_mtu_boundaries(priv);
1835 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1840 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1842 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1844 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1845 struct mlx5_eqe *eqe = data;
1847 switch (eqe->sub_type) {
1848 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1849 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1850 queue_work(priv->wq, &priv->update_carrier_work);
1859 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1860 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1862 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1870 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1872 struct net_device *netdev = priv->netdev;
1873 struct mlx5_core_dev *mdev = priv->mdev;
1874 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1877 netdev->min_mtu = ETH_MIN_MTU;
1878 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1879 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1880 mlx5e_set_dev_port_mtu(priv);
1882 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1883 mlx5e_tc_reoffload_flows_work);
1885 mlx5_lag_add(mdev, netdev);
1886 priv->events_nb.notifier_call = uplink_rep_async_event;
1887 mlx5_notifier_register(mdev, &priv->events_nb);
1888 #ifdef CONFIG_MLX5_CORE_EN_DCB
1889 mlx5e_dcbnl_initialize(priv);
1890 mlx5e_dcbnl_init_app(priv);
1894 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1896 struct mlx5_core_dev *mdev = priv->mdev;
1897 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1899 #ifdef CONFIG_MLX5_CORE_EN_DCB
1900 mlx5e_dcbnl_delete_app(priv);
1902 mlx5_notifier_unregister(mdev, &priv->events_nb);
1903 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
1904 mlx5_lag_remove(mdev);
1907 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1908 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1910 /* The stats groups order is opposite to the update_stats() order calls */
1911 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1912 &MLX5E_STATS_GRP(sw_rep),
1913 &MLX5E_STATS_GRP(vport_rep),
1916 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1918 return ARRAY_SIZE(mlx5e_rep_stats_grps);
1921 /* The stats groups order is opposite to the update_stats() order calls */
1922 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1923 &MLX5E_STATS_GRP(sw),
1924 &MLX5E_STATS_GRP(qcnt),
1925 &MLX5E_STATS_GRP(vnic_env),
1926 &MLX5E_STATS_GRP(vport),
1927 &MLX5E_STATS_GRP(802_3),
1928 &MLX5E_STATS_GRP(2863),
1929 &MLX5E_STATS_GRP(2819),
1930 &MLX5E_STATS_GRP(phy),
1931 &MLX5E_STATS_GRP(eth_ext),
1932 &MLX5E_STATS_GRP(pcie),
1933 &MLX5E_STATS_GRP(per_prio),
1934 &MLX5E_STATS_GRP(pme),
1935 &MLX5E_STATS_GRP(channels),
1936 &MLX5E_STATS_GRP(per_port_buff_congest),
1939 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1941 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1944 static const struct mlx5e_profile mlx5e_rep_profile = {
1945 .init = mlx5e_init_rep,
1946 .cleanup = mlx5e_cleanup_rep,
1947 .init_rx = mlx5e_init_rep_rx,
1948 .cleanup_rx = mlx5e_cleanup_rep_rx,
1949 .init_tx = mlx5e_init_rep_tx,
1950 .cleanup_tx = mlx5e_cleanup_rep_tx,
1951 .enable = mlx5e_rep_enable,
1952 .update_rx = mlx5e_update_rep_rx,
1953 .update_stats = mlx5e_update_ndo_stats,
1954 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1955 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1957 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1958 .stats_grps = mlx5e_rep_stats_grps,
1959 .stats_grps_num = mlx5e_rep_stats_grps_num,
1962 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1963 .init = mlx5e_init_rep,
1964 .cleanup = mlx5e_cleanup_rep,
1965 .init_rx = mlx5e_init_ul_rep_rx,
1966 .cleanup_rx = mlx5e_cleanup_ul_rep_rx,
1967 .init_tx = mlx5e_init_rep_tx,
1968 .cleanup_tx = mlx5e_cleanup_rep_tx,
1969 .enable = mlx5e_uplink_rep_enable,
1970 .disable = mlx5e_uplink_rep_disable,
1971 .update_rx = mlx5e_update_rep_rx,
1972 .update_stats = mlx5e_update_ndo_stats,
1973 .update_carrier = mlx5e_update_carrier,
1974 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1975 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1976 .max_tc = MLX5E_MAX_NUM_TC,
1977 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1978 .stats_grps = mlx5e_ul_rep_stats_grps,
1979 .stats_grps_num = mlx5e_ul_rep_stats_grps_num,
1983 is_devlink_port_supported(const struct mlx5_core_dev *dev,
1984 const struct mlx5e_rep_priv *rpriv)
1986 return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
1987 rpriv->rep->vport == MLX5_VPORT_PF ||
1988 mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
1992 vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
1994 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
1997 static int register_devlink_port(struct mlx5_core_dev *dev,
1998 struct mlx5e_rep_priv *rpriv)
2000 struct devlink *devlink = priv_to_devlink(dev);
2001 struct mlx5_eswitch_rep *rep = rpriv->rep;
2002 struct netdev_phys_item_id ppid = {};
2003 unsigned int dl_port_index = 0;
2005 if (!is_devlink_port_supported(dev, rpriv))
2008 mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
2010 if (rep->vport == MLX5_VPORT_UPLINK) {
2011 devlink_port_attrs_set(&rpriv->dl_port,
2012 DEVLINK_PORT_FLAVOUR_PHYSICAL,
2013 PCI_FUNC(dev->pdev->devfn), false, 0,
2014 &ppid.id[0], ppid.id_len);
2015 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
2016 } else if (rep->vport == MLX5_VPORT_PF) {
2017 devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
2018 &ppid.id[0], ppid.id_len,
2020 dl_port_index = rep->vport;
2021 } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
2022 rpriv->rep->vport)) {
2023 devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
2024 &ppid.id[0], ppid.id_len,
2027 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
2030 return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
2033 static void unregister_devlink_port(struct mlx5_core_dev *dev,
2034 struct mlx5e_rep_priv *rpriv)
2036 if (is_devlink_port_supported(dev, rpriv))
2037 devlink_port_unregister(&rpriv->dl_port);
2040 /* e-Switch vport representors */
2042 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
2044 const struct mlx5e_profile *profile;
2045 struct mlx5e_rep_priv *rpriv;
2046 struct net_device *netdev;
2049 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
2053 /* rpriv->rep to be looked up when profile->init() is called */
2056 nch = mlx5e_get_max_num_channels(dev);
2057 profile = (rep->vport == MLX5_VPORT_UPLINK) ?
2058 &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
2059 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
2062 "Failed to create representor netdev for vport %d\n",
2068 dev_net_set(netdev, mlx5_core_net(dev));
2069 rpriv->netdev = netdev;
2070 rep->rep_data[REP_ETH].priv = rpriv;
2071 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
2073 if (rep->vport == MLX5_VPORT_UPLINK) {
2074 err = mlx5e_create_mdev_resources(dev);
2076 goto err_destroy_netdev;
2079 err = mlx5e_attach_netdev(netdev_priv(netdev));
2082 "Failed to attach representor netdev for vport %d\n",
2084 goto err_destroy_mdev_resources;
2087 err = mlx5e_rep_neigh_init(rpriv);
2090 "Failed to initialized neighbours handling for vport %d\n",
2092 goto err_detach_netdev;
2095 err = register_devlink_port(dev, rpriv);
2097 netdev_warn(netdev, "Failed to register devlink port %d\n",
2099 goto err_neigh_cleanup;
2102 err = register_netdev(netdev);
2105 "Failed to register representor netdev for vport %d\n",
2107 goto err_devlink_cleanup;
2110 if (is_devlink_port_supported(dev, rpriv))
2111 devlink_port_type_eth_set(&rpriv->dl_port, netdev);
2114 err_devlink_cleanup:
2115 unregister_devlink_port(dev, rpriv);
2118 mlx5e_rep_neigh_cleanup(rpriv);
2121 mlx5e_detach_netdev(netdev_priv(netdev));
2123 err_destroy_mdev_resources:
2124 if (rep->vport == MLX5_VPORT_UPLINK)
2125 mlx5e_destroy_mdev_resources(dev);
2128 mlx5e_destroy_netdev(netdev_priv(netdev));
2134 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
2136 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
2137 struct net_device *netdev = rpriv->netdev;
2138 struct mlx5e_priv *priv = netdev_priv(netdev);
2139 struct mlx5_core_dev *dev = priv->mdev;
2140 void *ppriv = priv->ppriv;
2142 if (is_devlink_port_supported(dev, rpriv))
2143 devlink_port_type_clear(&rpriv->dl_port);
2144 unregister_netdev(netdev);
2145 unregister_devlink_port(dev, rpriv);
2146 mlx5e_rep_neigh_cleanup(rpriv);
2147 mlx5e_detach_netdev(priv);
2148 if (rep->vport == MLX5_VPORT_UPLINK)
2149 mlx5e_destroy_mdev_resources(priv->mdev);
2150 mlx5e_destroy_netdev(priv);
2151 kfree(ppriv); /* mlx5e_rep_priv */
2154 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
2156 struct mlx5e_rep_priv *rpriv;
2158 rpriv = mlx5e_rep_to_rep_priv(rep);
2160 return rpriv->netdev;
2163 static const struct mlx5_eswitch_rep_ops rep_ops = {
2164 .load = mlx5e_vport_rep_load,
2165 .unload = mlx5e_vport_rep_unload,
2166 .get_proto_dev = mlx5e_vport_rep_get_proto_dev
2169 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
2171 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2173 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
2176 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
2178 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2180 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);