1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2018 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/bitops.h>
19 #include <linux/irq.h>
20 #include <asm/byteorder.h>
21 #include <linux/bitmap.h>
27 static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
28 struct bnxt_ulp_ops *ulp_ops, void *handle)
30 struct net_device *dev = edev->net;
31 struct bnxt *bp = netdev_priv(dev);
35 if (ulp_id >= BNXT_MAX_ULP)
38 ulp = &edev->ulp_tbl[ulp_id];
39 if (rcu_access_pointer(ulp->ulp_ops)) {
40 netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
43 if (ulp_id == BNXT_ROCE_ULP) {
44 unsigned int max_stat_ctxs;
46 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
47 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
48 bp->cp_nr_rings == max_stat_ctxs)
52 atomic_set(&ulp->ref_count, 0);
54 rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
56 if (ulp_id == BNXT_ROCE_ULP) {
57 if (test_bit(BNXT_STATE_OPEN, &bp->state))
58 bnxt_hwrm_vnic_cfg(bp, 0);
64 static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
66 struct net_device *dev = edev->net;
67 struct bnxt *bp = netdev_priv(dev);
72 if (ulp_id >= BNXT_MAX_ULP)
75 ulp = &edev->ulp_tbl[ulp_id];
76 if (!rcu_access_pointer(ulp->ulp_ops)) {
77 netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
80 if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
81 edev->en_ops->bnxt_free_msix(edev, ulp_id);
83 if (ulp->max_async_event_id)
84 bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
86 RCU_INIT_POINTER(ulp->ulp_ops, NULL);
88 ulp->max_async_event_id = 0;
89 ulp->async_events_bmap = NULL;
90 while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
97 static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
99 struct bnxt_en_dev *edev = bp->edev;
100 int num_msix, idx, i;
102 num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
103 idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
104 for (i = 0; i < num_msix; i++) {
105 ent[i].vector = bp->irq_tbl[idx + i].vector;
106 ent[i].ring_idx = idx + i;
107 ent[i].db_offset = (idx + i) * 0x80;
111 static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
112 struct bnxt_msix_entry *ent, int num_msix)
114 struct net_device *dev = edev->net;
115 struct bnxt *bp = netdev_priv(dev);
116 int max_idx, max_cp_rings;
121 if (ulp_id != BNXT_ROCE_ULP)
124 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
127 if (edev->ulp_tbl[ulp_id].msix_requested)
130 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
131 avail_msix = bnxt_get_avail_msix(bp, num_msix);
134 if (avail_msix > num_msix)
135 avail_msix = num_msix;
137 if (BNXT_NEW_RM(bp)) {
138 idx = bp->cp_nr_rings;
140 max_idx = min_t(int, bp->total_irqs, max_cp_rings);
141 idx = max_idx - avail_msix;
143 edev->ulp_tbl[ulp_id].msix_base = idx;
144 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
145 if (bp->total_irqs < (idx + avail_msix)) {
146 if (netif_running(dev)) {
147 bnxt_close_nic(bp, true, false);
148 rc = bnxt_open_nic(bp, true, false);
150 rc = bnxt_reserve_rings(bp, true);
154 edev->ulp_tbl[ulp_id].msix_requested = 0;
158 if (BNXT_NEW_RM(bp)) {
159 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
161 avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
162 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
164 bnxt_fill_msix_vecs(bp, ent);
165 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
169 static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
171 struct net_device *dev = edev->net;
172 struct bnxt *bp = netdev_priv(dev);
175 if (ulp_id != BNXT_ROCE_ULP)
178 if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
181 edev->ulp_tbl[ulp_id].msix_requested = 0;
182 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
183 if (netif_running(dev)) {
184 bnxt_close_nic(bp, true, false);
185 bnxt_open_nic(bp, true, false);
190 int bnxt_get_ulp_msix_num(struct bnxt *bp)
192 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
193 struct bnxt_en_dev *edev = bp->edev;
195 return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
200 int bnxt_get_ulp_msix_base(struct bnxt *bp)
202 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
203 struct bnxt_en_dev *edev = bp->edev;
205 if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
206 return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
211 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
213 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
214 return BNXT_MIN_ROCE_STAT_CTXS;
219 static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
220 struct bnxt_fw_msg *fw_msg)
222 struct net_device *dev = edev->net;
223 struct bnxt *bp = netdev_priv(dev);
227 mutex_lock(&bp->hwrm_cmd_lock);
229 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
230 rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
233 struct output *resp = bp->hwrm_cmd_resp_addr;
234 u32 len = le16_to_cpu(resp->resp_len);
236 if (fw_msg->resp_max_len < len)
237 len = fw_msg->resp_max_len;
239 memcpy(fw_msg->resp, resp, len);
241 mutex_unlock(&bp->hwrm_cmd_lock);
245 static void bnxt_ulp_get(struct bnxt_ulp *ulp)
247 atomic_inc(&ulp->ref_count);
250 static void bnxt_ulp_put(struct bnxt_ulp *ulp)
252 atomic_dec(&ulp->ref_count);
255 void bnxt_ulp_stop(struct bnxt *bp)
257 struct bnxt_en_dev *edev = bp->edev;
258 struct bnxt_ulp_ops *ops;
264 for (i = 0; i < BNXT_MAX_ULP; i++) {
265 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
267 ops = rtnl_dereference(ulp->ulp_ops);
268 if (!ops || !ops->ulp_stop)
270 ops->ulp_stop(ulp->handle);
274 void bnxt_ulp_start(struct bnxt *bp)
276 struct bnxt_en_dev *edev = bp->edev;
277 struct bnxt_ulp_ops *ops;
283 for (i = 0; i < BNXT_MAX_ULP; i++) {
284 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
286 ops = rtnl_dereference(ulp->ulp_ops);
287 if (!ops || !ops->ulp_start)
289 ops->ulp_start(ulp->handle);
293 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
295 struct bnxt_en_dev *edev = bp->edev;
296 struct bnxt_ulp_ops *ops;
302 for (i = 0; i < BNXT_MAX_ULP; i++) {
303 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
306 ops = rcu_dereference(ulp->ulp_ops);
307 if (!ops || !ops->ulp_sriov_config) {
313 ops->ulp_sriov_config(ulp->handle, num_vfs);
318 void bnxt_ulp_shutdown(struct bnxt *bp)
320 struct bnxt_en_dev *edev = bp->edev;
321 struct bnxt_ulp_ops *ops;
327 for (i = 0; i < BNXT_MAX_ULP; i++) {
328 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
330 ops = rtnl_dereference(ulp->ulp_ops);
331 if (!ops || !ops->ulp_shutdown)
333 ops->ulp_shutdown(ulp->handle);
337 void bnxt_ulp_irq_stop(struct bnxt *bp)
339 struct bnxt_en_dev *edev = bp->edev;
340 struct bnxt_ulp_ops *ops;
342 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
345 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
346 struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
348 if (!ulp->msix_requested)
351 ops = rtnl_dereference(ulp->ulp_ops);
352 if (!ops || !ops->ulp_irq_stop)
354 ops->ulp_irq_stop(ulp->handle);
358 void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
360 struct bnxt_en_dev *edev = bp->edev;
361 struct bnxt_ulp_ops *ops;
363 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
366 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
367 struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
368 struct bnxt_msix_entry *ent = NULL;
370 if (!ulp->msix_requested)
373 ops = rtnl_dereference(ulp->ulp_ops);
374 if (!ops || !ops->ulp_irq_restart)
378 ent = kcalloc(ulp->msix_requested, sizeof(*ent),
382 bnxt_fill_msix_vecs(bp, ent);
384 ops->ulp_irq_restart(ulp->handle, ent);
389 void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
391 u16 event_id = le16_to_cpu(cmpl->event_id);
392 struct bnxt_en_dev *edev = bp->edev;
393 struct bnxt_ulp_ops *ops;
400 for (i = 0; i < BNXT_MAX_ULP; i++) {
401 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
403 ops = rcu_dereference(ulp->ulp_ops);
404 if (!ops || !ops->ulp_async_notifier)
406 if (!ulp->async_events_bmap ||
407 event_id > ulp->max_async_event_id)
410 /* Read max_async_event_id first before testing the bitmap. */
412 if (test_bit(event_id, ulp->async_events_bmap))
413 ops->ulp_async_notifier(ulp->handle, cmpl);
418 static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
419 unsigned long *events_bmap, u16 max_id)
421 struct net_device *dev = edev->net;
422 struct bnxt *bp = netdev_priv(dev);
423 struct bnxt_ulp *ulp;
425 if (ulp_id >= BNXT_MAX_ULP)
428 ulp = &edev->ulp_tbl[ulp_id];
429 ulp->async_events_bmap = events_bmap;
430 /* Make sure bnxt_ulp_async_events() sees this order */
432 ulp->max_async_event_id = max_id;
433 bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
437 static const struct bnxt_en_ops bnxt_en_ops_tbl = {
438 .bnxt_register_device = bnxt_register_dev,
439 .bnxt_unregister_device = bnxt_unregister_dev,
440 .bnxt_request_msix = bnxt_req_msix_vecs,
441 .bnxt_free_msix = bnxt_free_msix_vecs,
442 .bnxt_send_fw_msg = bnxt_send_msg,
443 .bnxt_register_fw_async_events = bnxt_register_async_events,
446 struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
448 struct bnxt *bp = netdev_priv(dev);
449 struct bnxt_en_dev *edev;
453 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
455 return ERR_PTR(-ENOMEM);
456 edev->en_ops = &bnxt_en_ops_tbl;
457 if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
458 edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
459 if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
460 edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
462 edev->pdev = bp->pdev;