2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_verbs.h>
34 #include <linux/mlx5/fs.h>
36 #include "en/params.h"
38 #include "en/fs_ethtool.h"
40 #define IB_DEFAULT_Q_KEY 0xb1b
41 #define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
43 static int mlx5i_open(struct net_device *netdev);
44 static int mlx5i_close(struct net_device *netdev);
45 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
47 static const struct net_device_ops mlx5i_netdev_ops = {
48 .ndo_open = mlx5i_open,
49 .ndo_stop = mlx5i_close,
50 .ndo_get_stats64 = mlx5i_get_stats,
51 .ndo_init = mlx5i_dev_init,
52 .ndo_uninit = mlx5i_dev_cleanup,
53 .ndo_change_mtu = mlx5i_change_mtu,
54 .ndo_eth_ioctl = mlx5i_ioctl,
57 /* IPoIB mlx5 netdev profile */
58 static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
59 struct mlx5e_params *params)
61 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
62 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
63 mlx5e_set_rq_type(mdev, params);
64 mlx5e_init_rq_type_params(mdev, params);
66 /* RQ size in ipoib by default is 512 */
67 params->log_rq_mtu_frames = is_kdump_kernel() ?
68 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
69 MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
71 params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
72 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
74 /* CQE compression is not supported for IPoIB */
75 params->rx_cqe_compress_def = false;
76 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
79 /* Called directly after IPoIB netdevice was created to initialize SW structs */
80 int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
82 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
84 netif_carrier_off(netdev);
85 mlx5e_set_netdev_mtu_boundaries(priv);
86 netdev->mtu = netdev->max_mtu;
88 mlx5e_build_nic_params(priv, NULL, netdev->mtu);
89 mlx5i_build_nic_params(mdev, &priv->channels.params);
91 mlx5e_timestamp_init(priv);
94 netdev->hw_features |= NETIF_F_SG;
95 netdev->hw_features |= NETIF_F_IP_CSUM;
96 netdev->hw_features |= NETIF_F_IPV6_CSUM;
97 netdev->hw_features |= NETIF_F_GRO;
98 netdev->hw_features |= NETIF_F_TSO;
99 netdev->hw_features |= NETIF_F_TSO6;
100 netdev->hw_features |= NETIF_F_RXCSUM;
101 netdev->hw_features |= NETIF_F_RXHASH;
103 netdev->netdev_ops = &mlx5i_netdev_ops;
104 netdev->ethtool_ops = &mlx5i_ethtool_ops;
109 /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
110 void mlx5i_cleanup(struct mlx5e_priv *priv)
112 mlx5e_priv_cleanup(priv);
115 static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
117 struct rtnl_link_stats64 s = {};
120 for (i = 0; i < priv->stats_nch; i++) {
121 struct mlx5e_channel_stats *channel_stats;
122 struct mlx5e_rq_stats *rq_stats;
124 channel_stats = priv->channel_stats[i];
125 rq_stats = &channel_stats->rq;
127 s.rx_packets += rq_stats->packets;
128 s.rx_bytes += rq_stats->bytes;
130 for (j = 0; j < priv->max_opened_tc; j++) {
131 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
133 s.tx_packets += sq_stats->packets;
134 s.tx_bytes += sq_stats->bytes;
135 s.tx_dropped += sq_stats->dropped;
139 memset(&priv->stats.sw, 0, sizeof(s));
141 priv->stats.sw.rx_packets = s.rx_packets;
142 priv->stats.sw.rx_bytes = s.rx_bytes;
143 priv->stats.sw.tx_packets = s.tx_packets;
144 priv->stats.sw.tx_bytes = s.tx_bytes;
145 priv->stats.sw.tx_queue_dropped = s.tx_dropped;
148 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
150 struct mlx5e_priv *priv = mlx5i_epriv(dev);
151 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
153 mlx5i_grp_sw_update_stats(priv);
155 stats->rx_packets = sstats->rx_packets;
156 stats->rx_bytes = sstats->rx_bytes;
157 stats->tx_packets = sstats->tx_packets;
158 stats->tx_bytes = sstats->tx_bytes;
159 stats->tx_dropped = sstats->tx_queue_dropped;
162 struct net_device *mlx5i_parent_get(struct net_device *netdev)
164 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
165 struct mlx5i_priv *ipriv, *parent_ipriv;
166 struct net_device *parent_dev;
171 parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev);
172 parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex);
176 parent_ipriv = netdev_priv(parent_dev);
179 parent_ipriv->num_sub_interfaces++;
181 ipriv->parent_dev = parent_dev;
186 void mlx5i_parent_put(struct net_device *netdev)
188 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
189 struct mlx5i_priv *ipriv, *parent_ipriv;
192 parent_ipriv = netdev_priv(ipriv->parent_dev);
195 parent_ipriv->num_sub_interfaces--;
197 dev_put(ipriv->parent_dev);
200 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
202 struct mlx5_core_dev *mdev = priv->mdev;
203 struct mlx5i_priv *ipriv = priv->ppriv;
207 u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
210 qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
212 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
213 MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
215 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
216 MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
218 MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
219 MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
220 ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
222 goto err_qp_modify_to_err;
225 u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
227 MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
228 MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
229 ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
231 goto err_qp_modify_to_err;
234 u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
236 MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
237 MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
238 ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
240 goto err_qp_modify_to_err;
244 err_qp_modify_to_err:
246 u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
248 MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
249 MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
250 mlx5_cmd_exec_in(mdev, qp_2err, in);
255 void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
257 struct mlx5i_priv *ipriv = priv->ppriv;
258 struct mlx5_core_dev *mdev = priv->mdev;
259 u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
261 MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
262 MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
263 mlx5_cmd_exec_in(mdev, qp_2rst, in);
266 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
268 int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
270 const unsigned char *dev_addr = priv->netdev->dev_addr;
271 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
272 u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
273 struct mlx5i_priv *ipriv = priv->ppriv;
279 if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) {
280 qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3];
281 MLX5_SET(create_qp_in, in, input_qpn, qpn);
284 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
285 MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
286 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
287 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
288 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
289 MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
291 addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
292 MLX5_SET(ads, addr_path, vhca_port_num, 1);
293 MLX5_SET(ads, addr_path, grh, 1);
295 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
296 ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
300 ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
305 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
307 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
309 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
310 MLX5_SET(destroy_qp_in, in, qpn, qpn);
311 mlx5_cmd_exec_in(mdev, destroy_qp, in);
314 int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
316 return mlx5e_refresh_tirs(priv, true, true);
319 int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
321 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
324 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
326 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
328 return mlx5e_create_tis(mdev, in, tisn);
331 static int mlx5i_init_tx(struct mlx5e_priv *priv)
333 struct mlx5i_priv *ipriv = priv->ppriv;
336 err = mlx5i_create_underlay_qp(priv);
338 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
342 err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &ipriv->tisn);
344 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
345 goto err_destroy_underlay_qp;
350 err_destroy_underlay_qp:
351 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
355 static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
357 struct mlx5i_priv *ipriv = priv->ppriv;
359 mlx5e_destroy_tis(priv->mdev, ipriv->tisn);
360 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
363 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
365 struct mlx5_flow_namespace *ns =
366 mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
373 mlx5e_fs_set_ns(priv->fs, ns, false);
374 err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
375 !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
377 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
379 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
382 err = mlx5e_create_ttc_table(priv->fs, priv->rx_res);
384 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
386 goto err_destroy_arfs_tables;
389 mlx5e_ethtool_init_steering(priv->fs);
393 err_destroy_arfs_tables:
394 mlx5e_arfs_destroy_tables(priv->fs,
395 !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
400 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
402 mlx5e_destroy_ttc_table(priv->fs);
403 mlx5e_arfs_destroy_tables(priv->fs,
404 !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
405 mlx5e_ethtool_cleanup_steering(priv->fs);
408 static int mlx5i_init_rx(struct mlx5e_priv *priv)
410 struct mlx5_core_dev *mdev = priv->mdev;
413 priv->fs = mlx5e_fs_init(priv->profile, mdev,
414 !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
417 netdev_err(priv->netdev, "FS allocation failed\n");
421 mlx5e_create_q_counters(priv);
423 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
425 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
426 goto err_destroy_q_counters;
429 priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
430 &priv->channels.params.packet_merge,
431 priv->channels.params.num_channels);
432 if (IS_ERR(priv->rx_res)) {
433 err = PTR_ERR(priv->rx_res);
434 goto err_close_drop_rq;
437 err = mlx5i_create_flow_steering(priv);
439 goto err_destroy_rx_res;
444 mlx5e_rx_res_destroy(priv->rx_res);
445 priv->rx_res = ERR_PTR(-EINVAL);
447 mlx5e_close_drop_rq(&priv->drop_rq);
448 err_destroy_q_counters:
449 mlx5e_destroy_q_counters(priv);
450 mlx5e_fs_cleanup(priv->fs);
454 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
456 mlx5i_destroy_flow_steering(priv);
457 mlx5e_rx_res_destroy(priv->rx_res);
458 priv->rx_res = ERR_PTR(-EINVAL);
459 mlx5e_close_drop_rq(&priv->drop_rq);
460 mlx5e_destroy_q_counters(priv);
461 mlx5e_fs_cleanup(priv->fs);
464 /* The stats groups order is opposite to the update_stats() order calls */
465 static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
466 &MLX5E_STATS_GRP(sw),
467 &MLX5E_STATS_GRP(qcnt),
468 &MLX5E_STATS_GRP(vnic_env),
469 &MLX5E_STATS_GRP(vport),
470 &MLX5E_STATS_GRP(802_3),
471 &MLX5E_STATS_GRP(2863),
472 &MLX5E_STATS_GRP(2819),
473 &MLX5E_STATS_GRP(phy),
474 &MLX5E_STATS_GRP(pcie),
475 &MLX5E_STATS_GRP(per_prio),
476 &MLX5E_STATS_GRP(pme),
477 &MLX5E_STATS_GRP(channels),
478 &MLX5E_STATS_GRP(per_port_buff_congest),
481 static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
483 return ARRAY_SIZE(mlx5i_stats_grps);
486 u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc)
488 struct mlx5i_priv *ipriv = priv->ppriv;
490 if (WARN(lag_port || tc,
491 "IPoIB unexpected non-zero value: lag_port (%u), tc (%u)\n",
498 static const struct mlx5e_profile mlx5i_nic_profile = {
500 .cleanup = mlx5i_cleanup,
501 .init_tx = mlx5i_init_tx,
502 .cleanup_tx = mlx5i_cleanup_tx,
503 .init_rx = mlx5i_init_rx,
504 .cleanup_rx = mlx5i_cleanup_rx,
505 .enable = NULL, /* mlx5i_enable */
506 .disable = NULL, /* mlx5i_disable */
507 .update_rx = mlx5i_update_nic_rx,
508 .update_stats = NULL, /* mlx5i_update_stats */
509 .update_carrier = NULL, /* no HW update in IB link */
510 .rx_handlers = &mlx5i_rx_handlers,
511 .max_tc = MLX5I_MAX_NUM_TC,
512 .stats_grps = mlx5i_stats_grps,
513 .stats_grps_num = mlx5i_stats_grps_num,
514 .get_tisn = mlx5i_get_tisn,
517 /* mlx5i netdev NDos */
519 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
521 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
522 struct mlx5e_params new_params;
525 mutex_lock(&priv->state_lock);
527 new_params = priv->channels.params;
528 new_params.sw_mtu = new_mtu;
530 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
534 netdev->mtu = new_params.sw_mtu;
537 mutex_unlock(&priv->state_lock);
541 int mlx5i_dev_init(struct net_device *dev)
543 struct mlx5e_priv *priv = mlx5i_epriv(dev);
544 struct mlx5i_priv *ipriv = priv->ppriv;
547 /* Set dev address using underlay QP */
548 addr_mod[0] = (ipriv->qpn >> 16) & 0xff;
549 addr_mod[1] = (ipriv->qpn >> 8) & 0xff;
550 addr_mod[2] = (ipriv->qpn) & 0xff;
551 dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod));
553 /* Add QPN to net-device mapping to HT */
554 mlx5i_pkey_add_qpn(dev, ipriv->qpn);
559 int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
561 struct mlx5e_priv *priv = mlx5i_epriv(dev);
565 return mlx5e_hwstamp_set(priv, ifr);
567 return mlx5e_hwstamp_get(priv, ifr);
573 void mlx5i_dev_cleanup(struct net_device *dev)
575 struct mlx5e_priv *priv = mlx5i_epriv(dev);
576 struct mlx5i_priv *ipriv = priv->ppriv;
578 mlx5i_uninit_underlay_qp(priv);
580 /* Delete QPN to net-device mapping from HT */
581 mlx5i_pkey_del_qpn(dev, ipriv->qpn);
584 static int mlx5i_open(struct net_device *netdev)
586 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
587 struct mlx5i_priv *ipriv = epriv->ppriv;
588 struct mlx5_core_dev *mdev = epriv->mdev;
591 mutex_lock(&epriv->state_lock);
593 set_bit(MLX5E_STATE_OPENED, &epriv->state);
595 err = mlx5i_init_underlay_qp(epriv);
597 mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
598 goto err_clear_state_opened_flag;
601 err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
603 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
607 err = mlx5e_open_channels(epriv, &epriv->channels);
609 goto err_remove_fs_underlay_qp;
611 err = epriv->profile->update_rx(epriv);
613 goto err_close_channels;
615 mlx5e_activate_priv_channels(epriv);
617 mutex_unlock(&epriv->state_lock);
621 mlx5e_close_channels(&epriv->channels);
622 err_remove_fs_underlay_qp:
623 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
625 mlx5i_uninit_underlay_qp(epriv);
626 err_clear_state_opened_flag:
627 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
628 mutex_unlock(&epriv->state_lock);
632 static int mlx5i_close(struct net_device *netdev)
634 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
635 struct mlx5i_priv *ipriv = epriv->ppriv;
636 struct mlx5_core_dev *mdev = epriv->mdev;
638 /* May already be CLOSED in case a previous configuration operation
639 * (e.g RX/TX queue size change) that involves close&open failed.
641 mutex_lock(&epriv->state_lock);
643 if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
646 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
648 netif_carrier_off(epriv->netdev);
649 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
650 mlx5e_deactivate_priv_channels(epriv);
651 mlx5e_close_channels(&epriv->channels);
652 mlx5i_uninit_underlay_qp(epriv);
654 mutex_unlock(&epriv->state_lock);
658 /* IPoIB RDMA netdev callbacks */
659 static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
660 union ib_gid *gid, u16 lid, int set_qkey,
663 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
664 struct mlx5_core_dev *mdev = epriv->mdev;
665 struct mlx5i_priv *ipriv = epriv->ppriv;
668 mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
670 err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
672 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
673 ipriv->qpn, gid->raw);
676 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
684 static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
685 union ib_gid *gid, u16 lid)
687 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
688 struct mlx5_core_dev *mdev = epriv->mdev;
689 struct mlx5i_priv *ipriv = epriv->ppriv;
692 mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
695 err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
697 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
698 ipriv->qpn, gid->raw);
703 static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
704 struct ib_ah *address, u32 dqpn)
706 struct mlx5e_priv *epriv = mlx5i_epriv(dev);
707 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)];
708 struct mlx5_ib_ah *mah = to_mah(address);
709 struct mlx5i_priv *ipriv = epriv->ppriv;
711 mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
716 static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
718 struct mlx5i_priv *ipriv = netdev_priv(netdev);
720 ipriv->pkey_index = (u16)id;
723 static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
725 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
728 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
729 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
736 static void mlx5_rdma_netdev_free(struct net_device *netdev)
738 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
739 struct mlx5_core_dev *mdev = priv->mdev;
740 struct mlx5i_priv *ipriv = priv->ppriv;
741 const struct mlx5e_profile *profile = priv->profile;
743 mlx5e_detach_netdev(priv);
744 profile->cleanup(priv);
746 if (!ipriv->sub_interface) {
747 mlx5i_pkey_qpn_ht_cleanup(netdev);
748 mlx5e_destroy_mdev_resources(mdev);
752 static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
754 return mdev->mlx5e_res.hw_objs.pdn != 0;
757 static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
759 if (mlx5_is_sub_interface(mdev))
760 return mlx5i_pkey_get_profile();
761 return &mlx5i_nic_profile;
764 static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
765 struct net_device *netdev, void *param)
767 struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
768 const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
769 struct mlx5i_priv *ipriv;
770 struct mlx5e_priv *epriv;
771 struct rdma_netdev *rn;
774 ipriv = netdev_priv(netdev);
775 epriv = mlx5i_epriv(netdev);
777 ipriv->sub_interface = mlx5_is_sub_interface(mdev);
778 if (!ipriv->sub_interface) {
779 err = mlx5i_pkey_qpn_ht_init(netdev);
781 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
785 /* This should only be called once per mdev */
786 err = mlx5e_create_mdev_resources(mdev);
791 err = mlx5e_priv_init(epriv, prof, netdev, mdev);
793 goto destroy_mdev_resources;
795 epriv->profile = prof;
796 epriv->ppriv = ipriv;
798 prof->init(mdev, netdev);
800 err = mlx5e_attach_netdev(epriv);
803 netif_carrier_off(netdev);
805 /* set rdma_netdev func pointers */
808 rn->send = mlx5i_xmit;
809 rn->attach_mcast = mlx5i_attach_mcast;
810 rn->detach_mcast = mlx5i_detach_mcast;
811 rn->set_id = mlx5i_set_pkey_index;
813 netdev->priv_destructor = mlx5_rdma_netdev_free;
814 netdev->needs_free_netdev = 1;
819 prof->cleanup(epriv);
820 if (ipriv->sub_interface)
822 destroy_mdev_resources:
823 mlx5e_destroy_mdev_resources(mdev);
825 mlx5i_pkey_qpn_ht_cleanup(netdev);
829 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
830 struct ib_device *device,
831 struct rdma_netdev_alloc_params *params)
836 rc = mlx5i_check_required_hca_cap(mdev);
840 nch = mlx5e_get_max_num_channels(mdev);
842 *params = (struct rdma_netdev_alloc_params){
843 .sizeof_priv = sizeof(struct mlx5i_priv) +
844 sizeof(struct mlx5e_priv),
845 .txqs = nch * MLX5_MAX_NUM_TC,
848 .initialize_rdma_netdev = mlx5_rdma_setup_rn,
853 EXPORT_SYMBOL(mlx5_rdma_rn_get_params);