2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include "mlx5_core.h"
36 static LIST_HEAD(intf_list);
37 static LIST_HEAD(mlx5_dev_list);
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
41 struct mlx5_device_context {
42 struct list_head list;
43 struct mlx5_interface *intf;
48 struct mlx5_delayed_event {
49 struct list_head list;
50 struct mlx5_core_dev *dev;
51 enum mlx5_dev_event event;
57 MLX5_INTERFACE_ATTACHED,
60 static void add_delayed_event(struct mlx5_priv *priv,
61 struct mlx5_core_dev *dev,
62 enum mlx5_dev_event event,
65 struct mlx5_delayed_event *delayed_event;
67 delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
69 mlx5_core_err(dev, "event %d is missed\n", event);
73 mlx5_core_dbg(dev, "Accumulating event %d\n", event);
74 delayed_event->dev = dev;
75 delayed_event->event = event;
76 delayed_event->param = param;
77 list_add_tail(&delayed_event->list, &priv->waiting_events_list);
80 static void delayed_event_release(struct mlx5_device_context *dev_ctx,
81 struct mlx5_priv *priv)
83 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
84 struct mlx5_delayed_event *de;
85 struct mlx5_delayed_event *n;
86 struct list_head temp;
88 INIT_LIST_HEAD(&temp);
90 spin_lock_irq(&priv->ctx_lock);
92 priv->is_accum_events = false;
93 list_splice_init(&priv->waiting_events_list, &temp);
94 if (!dev_ctx->context)
96 list_for_each_entry_safe(de, n, &temp, list)
97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
100 spin_unlock_irq(&priv->ctx_lock);
102 list_for_each_entry_safe(de, n, &temp, list) {
108 /* accumulating events that can come after mlx5_ib calls to
109 * ib_register_device, till adding that interface to the events list.
111 static void delayed_event_start(struct mlx5_priv *priv)
113 spin_lock_irq(&priv->ctx_lock);
114 priv->is_accum_events = true;
115 spin_unlock_irq(&priv->ctx_lock);
118 void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
120 struct mlx5_device_context *dev_ctx;
121 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
123 if (!mlx5_lag_intf_add(intf, priv))
126 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
130 dev_ctx->intf = intf;
132 delayed_event_start(priv);
134 dev_ctx->context = intf->add(dev);
135 if (dev_ctx->context) {
136 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
138 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
140 spin_lock_irq(&priv->ctx_lock);
141 list_add_tail(&dev_ctx->list, &priv->ctx_list);
143 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
144 if (dev_ctx->intf->pfault) {
146 mlx5_core_err(dev, "multiple page fault handlers not supported");
148 priv->pfault_ctx = dev_ctx->context;
149 priv->pfault = dev_ctx->intf->pfault;
153 spin_unlock_irq(&priv->ctx_lock);
156 delayed_event_release(dev_ctx, priv);
158 if (!dev_ctx->context)
162 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
163 struct mlx5_priv *priv)
165 struct mlx5_device_context *dev_ctx;
167 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
168 if (dev_ctx->intf == intf)
173 void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
175 struct mlx5_device_context *dev_ctx;
176 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
178 dev_ctx = mlx5_get_device(intf, priv);
182 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
183 spin_lock_irq(&priv->ctx_lock);
184 if (priv->pfault == dev_ctx->intf->pfault)
186 spin_unlock_irq(&priv->ctx_lock);
188 synchronize_srcu(&priv->pfault_srcu);
191 spin_lock_irq(&priv->ctx_lock);
192 list_del(&dev_ctx->list);
193 spin_unlock_irq(&priv->ctx_lock);
195 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
196 intf->remove(dev, dev_ctx->context);
201 static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
203 struct mlx5_device_context *dev_ctx;
204 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
206 dev_ctx = mlx5_get_device(intf, priv);
210 delayed_event_start(priv);
212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
214 if (intf->attach(dev, dev_ctx->context))
217 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
219 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
221 dev_ctx->context = intf->add(dev);
222 if (!dev_ctx->context)
225 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
229 delayed_event_release(dev_ctx, priv);
232 void mlx5_attach_device(struct mlx5_core_dev *dev)
234 struct mlx5_priv *priv = &dev->priv;
235 struct mlx5_interface *intf;
237 mutex_lock(&mlx5_intf_mutex);
238 list_for_each_entry(intf, &intf_list, list)
239 mlx5_attach_interface(intf, priv);
240 mutex_unlock(&mlx5_intf_mutex);
243 static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
245 struct mlx5_device_context *dev_ctx;
246 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
248 dev_ctx = mlx5_get_device(intf, priv);
253 if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
255 intf->detach(dev, dev_ctx->context);
256 clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
258 if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
260 intf->remove(dev, dev_ctx->context);
261 clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
265 void mlx5_detach_device(struct mlx5_core_dev *dev)
267 struct mlx5_priv *priv = &dev->priv;
268 struct mlx5_interface *intf;
270 mutex_lock(&mlx5_intf_mutex);
271 list_for_each_entry(intf, &intf_list, list)
272 mlx5_detach_interface(intf, priv);
273 mutex_unlock(&mlx5_intf_mutex);
276 bool mlx5_device_registered(struct mlx5_core_dev *dev)
278 struct mlx5_priv *priv;
281 mutex_lock(&mlx5_intf_mutex);
282 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
283 if (priv == &dev->priv)
285 mutex_unlock(&mlx5_intf_mutex);
290 int mlx5_register_device(struct mlx5_core_dev *dev)
292 struct mlx5_priv *priv = &dev->priv;
293 struct mlx5_interface *intf;
295 mutex_lock(&mlx5_intf_mutex);
296 list_add_tail(&priv->dev_list, &mlx5_dev_list);
297 list_for_each_entry(intf, &intf_list, list)
298 mlx5_add_device(intf, priv);
299 mutex_unlock(&mlx5_intf_mutex);
304 void mlx5_unregister_device(struct mlx5_core_dev *dev)
306 struct mlx5_priv *priv = &dev->priv;
307 struct mlx5_interface *intf;
309 mutex_lock(&mlx5_intf_mutex);
310 list_for_each_entry(intf, &intf_list, list)
311 mlx5_remove_device(intf, priv);
312 list_del(&priv->dev_list);
313 mutex_unlock(&mlx5_intf_mutex);
316 int mlx5_register_interface(struct mlx5_interface *intf)
318 struct mlx5_priv *priv;
320 if (!intf->add || !intf->remove)
323 mutex_lock(&mlx5_intf_mutex);
324 list_add_tail(&intf->list, &intf_list);
325 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
326 mlx5_add_device(intf, priv);
327 mutex_unlock(&mlx5_intf_mutex);
331 EXPORT_SYMBOL(mlx5_register_interface);
333 void mlx5_unregister_interface(struct mlx5_interface *intf)
335 struct mlx5_priv *priv;
337 mutex_lock(&mlx5_intf_mutex);
338 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
339 mlx5_remove_device(intf, priv);
340 list_del(&intf->list);
341 mutex_unlock(&mlx5_intf_mutex);
343 EXPORT_SYMBOL(mlx5_unregister_interface);
345 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
347 mutex_lock(&mlx5_intf_mutex);
348 mlx5_remove_dev_by_protocol(mdev, protocol);
349 mlx5_add_dev_by_protocol(mdev, protocol);
350 mutex_unlock(&mlx5_intf_mutex);
353 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
355 struct mlx5_priv *priv = &mdev->priv;
356 struct mlx5_device_context *dev_ctx;
360 spin_lock_irqsave(&priv->ctx_lock, flags);
362 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
363 if ((dev_ctx->intf->protocol == protocol) &&
364 dev_ctx->intf->get_dev) {
365 result = dev_ctx->intf->get_dev(dev_ctx->context);
369 spin_unlock_irqrestore(&priv->ctx_lock, flags);
373 EXPORT_SYMBOL(mlx5_get_protocol_dev);
375 /* Must be called with intf_mutex held */
376 void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
378 struct mlx5_interface *intf;
380 list_for_each_entry(intf, &intf_list, list)
381 if (intf->protocol == protocol) {
382 mlx5_add_device(intf, &dev->priv);
387 /* Must be called with intf_mutex held */
388 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
390 struct mlx5_interface *intf;
392 list_for_each_entry(intf, &intf_list, list)
393 if (intf->protocol == protocol) {
394 mlx5_remove_device(intf, &dev->priv);
399 static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
401 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
402 (dev->pdev->bus->number << 8) |
403 PCI_SLOT(dev->pdev->devfn));
406 /* Must be called with intf_mutex held */
407 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
409 u32 pci_id = mlx5_gen_pci_id(dev);
410 struct mlx5_core_dev *res = NULL;
411 struct mlx5_core_dev *tmp_dev;
412 struct mlx5_priv *priv;
414 list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
415 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
416 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
425 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
428 struct mlx5_priv *priv = &dev->priv;
429 struct mlx5_device_context *dev_ctx;
432 spin_lock_irqsave(&priv->ctx_lock, flags);
434 if (priv->is_accum_events)
435 add_delayed_event(priv, dev, event, param);
437 /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
438 * still in priv->ctx_list. In this case, only notify the dev_ctx if its
439 * ADDED or ATTACHED bit are set.
441 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
442 if (dev_ctx->intf->event &&
443 (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
444 test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
445 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
447 spin_unlock_irqrestore(&priv->ctx_lock, flags);
450 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
451 void mlx5_core_page_fault(struct mlx5_core_dev *dev,
452 struct mlx5_pagefault *pfault)
454 struct mlx5_priv *priv = &dev->priv;
457 srcu_idx = srcu_read_lock(&priv->pfault_srcu);
459 priv->pfault(dev, priv->pfault_ctx, pfault);
460 srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
464 void mlx5_dev_list_lock(void)
466 mutex_lock(&mlx5_intf_mutex);
469 void mlx5_dev_list_unlock(void)
471 mutex_unlock(&mlx5_intf_mutex);
474 int mlx5_dev_list_trylock(void)
476 return mutex_trylock(&mlx5_intf_mutex);