Merge tag 'pcmcia-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/brodo...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / dev.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38 #include "devlink.h"
39 #include "lag/lag.h"
40
41 static DEFINE_IDA(mlx5_adev_ida);
42
43 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
44 {
45         if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
46                 return false;
47
48         if (!MLX5_ESWITCH_MANAGER(dev))
49                 return false;
50
51         if (!is_mdev_switchdev_mode(dev))
52                 return false;
53
54         return true;
55 }
56
57 bool mlx5_eth_supported(struct mlx5_core_dev *dev)
58 {
59         if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
60                 return false;
61
62         if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
63                 return false;
64
65         if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
66                 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
67                 return false;
68         }
69
70         if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
71                 mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
72                 return false;
73         }
74
75         if (!MLX5_CAP_ETH(dev, csum_cap)) {
76                 mlx5_core_warn(dev, "Missing csum_cap capability\n");
77                 return false;
78         }
79
80         if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
81                 mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
82                 return false;
83         }
84
85         if (!MLX5_CAP_ETH(dev, vlan_cap)) {
86                 mlx5_core_warn(dev, "Missing vlan_cap capability\n");
87                 return false;
88         }
89
90         if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
91                 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
92                 return false;
93         }
94
95         if (MLX5_CAP_FLOWTABLE(dev,
96                                flow_table_properties_nic_receive.max_ft_level) < 3) {
97                 mlx5_core_warn(dev, "max_ft_level < 3\n");
98                 return false;
99         }
100
101         if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
102                 mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
103         if (!MLX5_CAP_GEN(dev, cq_moderation))
104                 mlx5_core_warn(dev, "CQ moderation is not supported\n");
105
106         return true;
107 }
108
109 bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
110 {
111         if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
112                 return false;
113
114         if (mlx5_core_is_pf(dev))
115                 return false;
116
117         if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
118               MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
119                 return false;
120
121         if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
122               MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
123                 return false;
124
125         if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
126                 return false;
127
128         return true;
129 }
130
131 static bool is_vnet_enabled(struct mlx5_core_dev *dev)
132 {
133         union devlink_param_value val;
134         int err;
135
136         err = devl_param_driverinit_value_get(priv_to_devlink(dev),
137                                               DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
138                                               &val);
139         return err ? false : val.vbool;
140 }
141
142 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
143 {
144         if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
145                 return false;
146
147         if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
148                 return false;
149
150         if (!is_eth_rep_supported(dev))
151                 return false;
152
153         if (mlx5_core_mp_enabled(dev))
154                 return false;
155
156         return true;
157 }
158
159 static bool is_mp_supported(struct mlx5_core_dev *dev)
160 {
161         if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
162                 return false;
163
164         if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
165                 return false;
166
167         if (is_ib_rep_supported(dev))
168                 return false;
169
170         if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
171                 return false;
172
173         if (!mlx5_core_is_mp_slave(dev))
174                 return false;
175
176         return true;
177 }
178
179 bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
180 {
181         if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
182                 return false;
183
184         if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
185                 return false;
186
187         if (is_ib_rep_supported(dev))
188                 return false;
189
190         if (is_mp_supported(dev))
191                 return false;
192
193         return true;
194 }
195
196 static bool is_ib_enabled(struct mlx5_core_dev *dev)
197 {
198         union devlink_param_value val;
199         int err;
200
201         err = devl_param_driverinit_value_get(priv_to_devlink(dev),
202                                               DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
203                                               &val);
204         return err ? false : val.vbool;
205 }
206
207 static bool is_dpll_supported(struct mlx5_core_dev *dev)
208 {
209         if (!IS_ENABLED(CONFIG_MLX5_DPLL))
210                 return false;
211
212         if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
213                 mlx5_core_dbg(dev, "Missing SyncE capability\n");
214                 return false;
215         }
216
217         return true;
218 }
219
220 enum {
221         MLX5_INTERFACE_PROTOCOL_ETH,
222         MLX5_INTERFACE_PROTOCOL_ETH_REP,
223
224         MLX5_INTERFACE_PROTOCOL_IB,
225         MLX5_INTERFACE_PROTOCOL_IB_REP,
226         MLX5_INTERFACE_PROTOCOL_MPIB,
227
228         MLX5_INTERFACE_PROTOCOL_VNET,
229
230         MLX5_INTERFACE_PROTOCOL_DPLL,
231 };
232
233 static const struct mlx5_adev_device {
234         const char *suffix;
235         bool (*is_supported)(struct mlx5_core_dev *dev);
236         bool (*is_enabled)(struct mlx5_core_dev *dev);
237 } mlx5_adev_devices[] = {
238         [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
239                                            .is_supported = &mlx5_vnet_supported,
240                                            .is_enabled = &is_vnet_enabled },
241         [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
242                                          .is_supported = &mlx5_rdma_supported,
243                                          .is_enabled = &is_ib_enabled },
244         [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
245                                           .is_supported = &mlx5_eth_supported,
246                                           .is_enabled = &mlx5_core_is_eth_enabled },
247         [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
248                                            .is_supported = &is_eth_rep_supported },
249         [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
250                                            .is_supported = &is_ib_rep_supported },
251         [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
252                                            .is_supported = &is_mp_supported },
253         [MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
254                                            .is_supported = &is_dpll_supported },
255 };
256
257 int mlx5_adev_idx_alloc(void)
258 {
259         return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
260 }
261
262 void mlx5_adev_idx_free(int idx)
263 {
264         ida_free(&mlx5_adev_ida, idx);
265 }
266
267 int mlx5_adev_init(struct mlx5_core_dev *dev)
268 {
269         struct mlx5_priv *priv = &dev->priv;
270
271         priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
272                              sizeof(struct mlx5_adev *), GFP_KERNEL);
273         if (!priv->adev)
274                 return -ENOMEM;
275
276         return 0;
277 }
278
279 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
280 {
281         struct mlx5_priv *priv = &dev->priv;
282
283         kfree(priv->adev);
284 }
285
286 static void adev_release(struct device *dev)
287 {
288         struct mlx5_adev *mlx5_adev =
289                 container_of(dev, struct mlx5_adev, adev.dev);
290         struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
291         int idx = mlx5_adev->idx;
292
293         kfree(mlx5_adev);
294         priv->adev[idx] = NULL;
295 }
296
297 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
298 {
299         const char *suffix = mlx5_adev_devices[idx].suffix;
300         struct auxiliary_device *adev;
301         struct mlx5_adev *madev;
302         int ret;
303
304         madev = kzalloc(sizeof(*madev), GFP_KERNEL);
305         if (!madev)
306                 return ERR_PTR(-ENOMEM);
307
308         adev = &madev->adev;
309         adev->id = dev->priv.adev_idx;
310         adev->name = suffix;
311         adev->dev.parent = dev->device;
312         adev->dev.release = adev_release;
313         madev->mdev = dev;
314         madev->idx = idx;
315
316         ret = auxiliary_device_init(adev);
317         if (ret) {
318                 kfree(madev);
319                 return ERR_PTR(ret);
320         }
321
322         ret = auxiliary_device_add(adev);
323         if (ret) {
324                 auxiliary_device_uninit(adev);
325                 return ERR_PTR(ret);
326         }
327         return madev;
328 }
329
330 static void del_adev(struct auxiliary_device *adev)
331 {
332         auxiliary_device_delete(adev);
333         auxiliary_device_uninit(adev);
334 }
335
336 void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev)
337 {
338         mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
339         dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
340         mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
341 }
342
343 bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev)
344 {
345         return dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
346 }
347
348 int mlx5_attach_device(struct mlx5_core_dev *dev)
349 {
350         struct mlx5_priv *priv = &dev->priv;
351         struct auxiliary_device *adev;
352         struct auxiliary_driver *adrv;
353         int ret = 0, i;
354
355         devl_assert_locked(priv_to_devlink(dev));
356         mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
357         priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
358         for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
359                 if (!priv->adev[i]) {
360                         bool is_supported = false;
361
362                         if (mlx5_adev_devices[i].is_enabled) {
363                                 bool enabled;
364
365                                 enabled = mlx5_adev_devices[i].is_enabled(dev);
366                                 if (!enabled)
367                                         continue;
368                         }
369
370                         if (mlx5_adev_devices[i].is_supported)
371                                 is_supported = mlx5_adev_devices[i].is_supported(dev);
372
373                         if (!is_supported)
374                                 continue;
375
376                         priv->adev[i] = add_adev(dev, i);
377                         if (IS_ERR(priv->adev[i])) {
378                                 ret = PTR_ERR(priv->adev[i]);
379                                 priv->adev[i] = NULL;
380                         }
381                 } else {
382                         adev = &priv->adev[i]->adev;
383
384                         /* Pay attention that this is not PCI driver that
385                          * mlx5_core_dev is connected, but auxiliary driver.
386                          */
387                         if (!adev->dev.driver)
388                                 continue;
389                         adrv = to_auxiliary_drv(adev->dev.driver);
390
391                         if (adrv->resume)
392                                 ret = adrv->resume(adev);
393                 }
394                 if (ret) {
395                         mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
396                                        i, mlx5_adev_devices[i].suffix);
397
398                         break;
399                 }
400         }
401         mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
402         return ret;
403 }
404
405 void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
406 {
407         struct mlx5_priv *priv = &dev->priv;
408         struct auxiliary_device *adev;
409         struct auxiliary_driver *adrv;
410         pm_message_t pm = {};
411         int i;
412
413         devl_assert_locked(priv_to_devlink(dev));
414         mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
415         for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
416                 if (!priv->adev[i])
417                         continue;
418
419                 if (mlx5_adev_devices[i].is_enabled) {
420                         bool enabled;
421
422                         enabled = mlx5_adev_devices[i].is_enabled(dev);
423                         if (!enabled)
424                                 goto skip_suspend;
425                 }
426
427                 adev = &priv->adev[i]->adev;
428                 /* Auxiliary driver was unbind manually through sysfs */
429                 if (!adev->dev.driver)
430                         goto skip_suspend;
431
432                 adrv = to_auxiliary_drv(adev->dev.driver);
433
434                 if (adrv->suspend && suspend) {
435                         adrv->suspend(adev, pm);
436                         continue;
437                 }
438
439 skip_suspend:
440                 del_adev(&priv->adev[i]->adev);
441                 priv->adev[i] = NULL;
442         }
443         priv->flags |= MLX5_PRIV_FLAGS_DETACH;
444         mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
445 }
446
447 int mlx5_register_device(struct mlx5_core_dev *dev)
448 {
449         int ret;
450
451         devl_assert_locked(priv_to_devlink(dev));
452         mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
453         dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
454         ret = mlx5_rescan_drivers_locked(dev);
455         mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
456         if (ret)
457                 mlx5_unregister_device(dev);
458
459         return ret;
460 }
461
462 void mlx5_unregister_device(struct mlx5_core_dev *dev)
463 {
464         devl_assert_locked(priv_to_devlink(dev));
465         mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
466         dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
467         mlx5_rescan_drivers_locked(dev);
468         mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
469 }
470
471 static int add_drivers(struct mlx5_core_dev *dev)
472 {
473         struct mlx5_priv *priv = &dev->priv;
474         int i, ret = 0;
475
476         for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
477                 bool is_supported = false;
478
479                 if (priv->adev[i])
480                         continue;
481
482                 if (mlx5_adev_devices[i].is_enabled &&
483                     !(mlx5_adev_devices[i].is_enabled(dev)))
484                         continue;
485
486                 if (mlx5_adev_devices[i].is_supported)
487                         is_supported = mlx5_adev_devices[i].is_supported(dev);
488
489                 if (!is_supported)
490                         continue;
491
492                 priv->adev[i] = add_adev(dev, i);
493                 if (IS_ERR(priv->adev[i])) {
494                         mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
495                                        i, mlx5_adev_devices[i].suffix);
496                         /* We continue to rescan drivers and leave to the caller
497                          * to make decision if to release everything or continue.
498                          */
499                         ret = PTR_ERR(priv->adev[i]);
500                         priv->adev[i] = NULL;
501                 }
502         }
503         return ret;
504 }
505
506 static void delete_drivers(struct mlx5_core_dev *dev)
507 {
508         struct mlx5_priv *priv = &dev->priv;
509         bool delete_all;
510         int i;
511
512         delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
513
514         for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
515                 bool is_supported = false;
516
517                 if (!priv->adev[i])
518                         continue;
519
520                 if (mlx5_adev_devices[i].is_enabled) {
521                         bool enabled;
522
523                         enabled = mlx5_adev_devices[i].is_enabled(dev);
524                         if (!enabled)
525                                 goto del_adev;
526                 }
527
528                 if (mlx5_adev_devices[i].is_supported && !delete_all)
529                         is_supported = mlx5_adev_devices[i].is_supported(dev);
530
531                 if (is_supported)
532                         continue;
533
534 del_adev:
535                 del_adev(&priv->adev[i]->adev);
536                 priv->adev[i] = NULL;
537         }
538 }
539
540 /* This function is used after mlx5_core_dev is reconfigured.
541  */
542 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
543 {
544         struct mlx5_priv *priv = &dev->priv;
545
546         if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
547                 return 0;
548
549         delete_drivers(dev);
550         if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
551                 return 0;
552
553         return add_drivers(dev);
554 }
555
556 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
557 {
558         u64 fsystem_guid, psystem_guid;
559
560         fsystem_guid = mlx5_query_nic_system_image_guid(dev);
561         psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
562
563         return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
564 }