2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include "mlx5_core.h"
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40 static DEFINE_IDA(mlx5_adev_ida);
42 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
44 if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
47 if (!MLX5_ESWITCH_MANAGER(dev))
50 if (!is_mdev_switchdev_mode(dev))
56 static bool is_eth_supported(struct mlx5_core_dev *dev)
58 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
61 if (is_eth_rep_supported(dev))
64 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
67 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
68 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
72 if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
73 mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
77 if (!MLX5_CAP_ETH(dev, csum_cap)) {
78 mlx5_core_warn(dev, "Missing csum_cap capability\n");
82 if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
83 mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
87 if (!MLX5_CAP_ETH(dev, vlan_cap)) {
88 mlx5_core_warn(dev, "Missing vlan_cap capability\n");
92 if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
93 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
97 if (MLX5_CAP_FLOWTABLE(dev,
98 flow_table_properties_nic_receive.max_ft_level) < 3) {
99 mlx5_core_warn(dev, "max_ft_level < 3\n");
103 if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
104 mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
105 if (!MLX5_CAP_GEN(dev, cq_moderation))
106 mlx5_core_warn(dev, "CQ moderation is not supported\n");
111 static bool is_vnet_supported(struct mlx5_core_dev *dev)
113 if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
116 if (mlx5_core_is_pf(dev))
119 if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
120 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
123 if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
124 MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
127 if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
133 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
135 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
138 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
141 if (!is_eth_rep_supported(dev))
144 if (!MLX5_ESWITCH_MANAGER(dev))
147 if (!is_mdev_switchdev_mode(dev))
150 if (mlx5_core_mp_enabled(dev))
156 static bool is_mp_supported(struct mlx5_core_dev *dev)
158 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
161 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
164 if (is_ib_rep_supported(dev))
167 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
170 if (!mlx5_core_is_mp_slave(dev))
176 static bool is_ib_supported(struct mlx5_core_dev *dev)
178 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
181 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
184 if (is_ib_rep_supported(dev))
187 if (is_mp_supported(dev))
194 MLX5_INTERFACE_PROTOCOL_ETH,
195 MLX5_INTERFACE_PROTOCOL_ETH_REP,
197 MLX5_INTERFACE_PROTOCOL_IB,
198 MLX5_INTERFACE_PROTOCOL_IB_REP,
199 MLX5_INTERFACE_PROTOCOL_MPIB,
201 MLX5_INTERFACE_PROTOCOL_VNET,
204 static const struct mlx5_adev_device {
206 bool (*is_supported)(struct mlx5_core_dev *dev);
207 } mlx5_adev_devices[] = {
208 [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
209 .is_supported = &is_vnet_supported },
210 [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
211 .is_supported = &is_ib_supported },
212 [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
213 .is_supported = &is_eth_supported },
214 [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
215 .is_supported = &is_eth_rep_supported },
216 [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
217 .is_supported = &is_ib_rep_supported },
218 [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
219 .is_supported = &is_mp_supported },
222 int mlx5_adev_idx_alloc(void)
224 return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
227 void mlx5_adev_idx_free(int idx)
229 ida_free(&mlx5_adev_ida, idx);
232 int mlx5_adev_init(struct mlx5_core_dev *dev)
234 struct mlx5_priv *priv = &dev->priv;
236 priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
237 sizeof(struct mlx5_adev *), GFP_KERNEL);
244 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
246 struct mlx5_priv *priv = &dev->priv;
251 static void adev_release(struct device *dev)
253 struct mlx5_adev *mlx5_adev =
254 container_of(dev, struct mlx5_adev, adev.dev);
255 struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
256 int idx = mlx5_adev->idx;
259 priv->adev[idx] = NULL;
262 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
264 const char *suffix = mlx5_adev_devices[idx].suffix;
265 struct auxiliary_device *adev;
266 struct mlx5_adev *madev;
269 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
271 return ERR_PTR(-ENOMEM);
274 adev->id = dev->priv.adev_idx;
276 adev->dev.parent = dev->device;
277 adev->dev.release = adev_release;
281 ret = auxiliary_device_init(adev);
287 ret = auxiliary_device_add(adev);
289 auxiliary_device_uninit(adev);
295 static void del_adev(struct auxiliary_device *adev)
297 auxiliary_device_delete(adev);
298 auxiliary_device_uninit(adev);
301 int mlx5_attach_device(struct mlx5_core_dev *dev)
303 struct mlx5_priv *priv = &dev->priv;
304 struct auxiliary_device *adev;
305 struct auxiliary_driver *adrv;
308 mutex_lock(&mlx5_intf_mutex);
309 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
310 if (!priv->adev[i]) {
311 bool is_supported = false;
313 if (mlx5_adev_devices[i].is_supported)
314 is_supported = mlx5_adev_devices[i].is_supported(dev);
319 priv->adev[i] = add_adev(dev, i);
320 if (IS_ERR(priv->adev[i])) {
321 ret = PTR_ERR(priv->adev[i]);
322 priv->adev[i] = NULL;
325 adev = &priv->adev[i]->adev;
326 adrv = to_auxiliary_drv(adev->dev.driver);
329 ret = adrv->resume(adev);
332 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
333 i, mlx5_adev_devices[i].suffix);
338 mutex_unlock(&mlx5_intf_mutex);
342 void mlx5_detach_device(struct mlx5_core_dev *dev)
344 struct mlx5_priv *priv = &dev->priv;
345 struct auxiliary_device *adev;
346 struct auxiliary_driver *adrv;
347 pm_message_t pm = {};
350 mutex_lock(&mlx5_intf_mutex);
351 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
355 adev = &priv->adev[i]->adev;
356 adrv = to_auxiliary_drv(adev->dev.driver);
359 adrv->suspend(adev, pm);
363 del_adev(&priv->adev[i]->adev);
364 priv->adev[i] = NULL;
366 mutex_unlock(&mlx5_intf_mutex);
369 int mlx5_register_device(struct mlx5_core_dev *dev)
373 mutex_lock(&mlx5_intf_mutex);
374 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
375 ret = mlx5_rescan_drivers_locked(dev);
376 mutex_unlock(&mlx5_intf_mutex);
378 mlx5_unregister_device(dev);
383 void mlx5_unregister_device(struct mlx5_core_dev *dev)
385 mutex_lock(&mlx5_intf_mutex);
386 dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
387 mlx5_rescan_drivers_locked(dev);
388 mutex_unlock(&mlx5_intf_mutex);
391 static int add_drivers(struct mlx5_core_dev *dev)
393 struct mlx5_priv *priv = &dev->priv;
396 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
397 bool is_supported = false;
402 if (mlx5_adev_devices[i].is_supported)
403 is_supported = mlx5_adev_devices[i].is_supported(dev);
408 priv->adev[i] = add_adev(dev, i);
409 if (IS_ERR(priv->adev[i])) {
410 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
411 i, mlx5_adev_devices[i].suffix);
412 /* We continue to rescan drivers and leave to the caller
413 * to make decision if to release everything or continue.
415 ret = PTR_ERR(priv->adev[i]);
416 priv->adev[i] = NULL;
422 static void delete_drivers(struct mlx5_core_dev *dev)
424 struct mlx5_priv *priv = &dev->priv;
428 delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
430 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
431 bool is_supported = false;
436 if (mlx5_adev_devices[i].is_supported && !delete_all)
437 is_supported = mlx5_adev_devices[i].is_supported(dev);
442 del_adev(&priv->adev[i]->adev);
443 priv->adev[i] = NULL;
447 /* This function is used after mlx5_core_dev is reconfigured.
449 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
451 struct mlx5_priv *priv = &dev->priv;
453 lockdep_assert_held(&mlx5_intf_mutex);
456 if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
459 return add_drivers(dev);
462 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
464 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
465 (dev->pdev->bus->number << 8) |
466 PCI_SLOT(dev->pdev->devfn));
469 static int next_phys_dev(struct device *dev, const void *data)
471 struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
472 struct mlx5_core_dev *mdev = madev->mdev;
473 const struct mlx5_core_dev *curr = data;
475 if (!mlx5_core_is_pf(mdev))
481 if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
487 /* This function is called with two flows:
488 * 1. During initialization of mlx5_core_dev and we don't need to lock it.
489 * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
491 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
493 struct auxiliary_device *adev;
494 struct mlx5_adev *madev;
496 if (!mlx5_core_is_pf(dev))
499 adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
503 madev = container_of(adev, struct mlx5_adev, adev);
504 put_device(&adev->dev);
508 void mlx5_dev_list_lock(void)
510 mutex_lock(&mlx5_intf_mutex);
512 void mlx5_dev_list_unlock(void)
514 mutex_unlock(&mlx5_intf_mutex);
517 int mlx5_dev_list_trylock(void)
519 return mutex_trylock(&mlx5_intf_mutex);