1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
11 #include <linux/bitops.h>
12 #include <linux/netdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_vlan.h>
18 #include <linux/if_ether.h>
20 #include <linux/prefetch.h>
21 #include <linux/module.h>
27 static DEFINE_MUTEX(bnad_fwimg_mutex);
32 static uint bnad_msix_disable;
33 module_param(bnad_msix_disable, uint, 0444);
34 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
36 static uint bnad_ioc_auto_recover = 1;
37 module_param(bnad_ioc_auto_recover, uint, 0444);
38 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
40 static uint bna_debugfs_enable = 1;
41 module_param(bna_debugfs_enable, uint, 0644);
42 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
43 " Range[false:0|true:1]");
48 static u32 bnad_rxqs_per_cq = 2;
49 static atomic_t bna_id;
50 static const u8 bnad_bcast_addr[] __aligned(2) =
51 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
56 #define BNAD_GET_MBOX_IRQ(_bnad) \
57 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
58 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
59 ((_bnad)->pcidev->irq))
61 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
63 (_res_info)->res_type = BNA_RES_T_MEM; \
64 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
65 (_res_info)->res_u.mem_info.num = (_num); \
66 (_res_info)->res_u.mem_info.len = (_size); \
70 * Reinitialize completions in CQ, once Rx is taken down
73 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
75 struct bna_cq_entry *cmpl;
78 for (i = 0; i < ccb->q_depth; i++) {
79 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
84 /* Tx Datapath functions */
87 /* Caller should ensure that the entry at unmap_q[index] is valid */
89 bnad_tx_buff_unmap(struct bnad *bnad,
90 struct bnad_tx_unmap *unmap_q,
91 u32 q_depth, u32 index)
93 struct bnad_tx_unmap *unmap;
97 unmap = &unmap_q[index];
103 dma_unmap_single(&bnad->pcidev->dev,
104 dma_unmap_addr(&unmap->vectors[0], dma_addr),
105 skb_headlen(skb), DMA_TO_DEVICE);
106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
112 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
114 BNA_QE_INDX_INC(index, q_depth);
115 unmap = &unmap_q[index];
118 dma_unmap_page(&bnad->pcidev->dev,
119 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
120 dma_unmap_len(&unmap->vectors[vector], dma_len),
122 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
126 BNA_QE_INDX_INC(index, q_depth);
132 * Frees all pending Tx Bufs
133 * At this point no activity is expected on the Q,
134 * so DMA unmap & freeing is fine.
137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
139 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
143 for (i = 0; i < tcb->q_depth; i++) {
144 skb = unmap_q[i].skb;
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
149 dev_kfree_skb_any(skb);
154 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
155 * Can be called in a) Interrupt context
159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
161 u32 sent_packets = 0, sent_bytes = 0;
162 u32 wis, unmap_wis, hw_cons, cons, q_depth;
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164 struct bnad_tx_unmap *unmap;
167 /* Just return if TX is stopped */
168 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
171 hw_cons = *(tcb->hw_consumer_index);
173 cons = tcb->consumer_index;
174 q_depth = tcb->q_depth;
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
177 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
180 unmap = &unmap_q[cons];
185 sent_bytes += skb->len;
187 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
190 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
191 dev_kfree_skb_any(skb);
194 /* Update consumer pointers. */
195 tcb->consumer_index = hw_cons;
197 tcb->txq->tx_packets += sent_packets;
198 tcb->txq->tx_bytes += sent_bytes;
204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
206 struct net_device *netdev = bnad->netdev;
209 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
212 sent = bnad_txcmpl_process(bnad, tcb);
214 if (netif_queue_stopped(netdev) &&
215 netif_carrier_ok(netdev) &&
216 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
217 BNAD_NETIF_WAKE_THRESHOLD) {
218 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
219 netif_wake_queue(netdev);
220 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
225 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
226 bna_ib_ack(tcb->i_dbell, sent);
228 smp_mb__before_atomic();
229 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
234 /* MSIX Tx Completion Handler */
236 bnad_msix_tx(int irq, void *data)
238 struct bna_tcb *tcb = (struct bna_tcb *)data;
239 struct bnad *bnad = tcb->bnad;
241 bnad_tx_complete(bnad, tcb);
247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
249 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
251 unmap_q->reuse_pi = -1;
252 unmap_q->alloc_order = -1;
253 unmap_q->map_size = 0;
254 unmap_q->type = BNAD_RXBUF_NONE;
257 /* Default is page-based allocation. Multi-buffer support - TBD */
259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
261 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
264 bnad_rxq_alloc_uninit(bnad, rcb);
266 order = get_order(rcb->rxq->buffer_size);
268 unmap_q->type = BNAD_RXBUF_PAGE;
270 if (bna_is_small_rxq(rcb->id)) {
271 unmap_q->alloc_order = 0;
272 unmap_q->map_size = rcb->rxq->buffer_size;
274 if (rcb->rxq->multi_buffer) {
275 unmap_q->alloc_order = 0;
276 unmap_q->map_size = rcb->rxq->buffer_size;
277 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
279 unmap_q->alloc_order = order;
281 (rcb->rxq->buffer_size > 2048) ?
282 PAGE_SIZE << order : 2048;
286 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
292 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
297 dma_unmap_page(&bnad->pcidev->dev,
298 dma_unmap_addr(&unmap->vector, dma_addr),
299 unmap->vector.len, DMA_FROM_DEVICE);
300 put_page(unmap->page);
302 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
303 unmap->vector.len = 0;
307 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
312 dma_unmap_single(&bnad->pcidev->dev,
313 dma_unmap_addr(&unmap->vector, dma_addr),
314 unmap->vector.len, DMA_FROM_DEVICE);
315 dev_kfree_skb_any(unmap->skb);
317 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
318 unmap->vector.len = 0;
322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
324 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
327 for (i = 0; i < rcb->q_depth; i++) {
328 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
330 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
331 bnad_rxq_cleanup_skb(bnad, unmap);
333 bnad_rxq_cleanup_page(bnad, unmap);
335 bnad_rxq_alloc_uninit(bnad, rcb);
339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
341 u32 alloced, prod, q_depth;
342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_rx_unmap *unmap, *prev;
344 struct bna_rxq_entry *rxent;
346 u32 page_offset, alloc_size;
349 prod = rcb->producer_index;
350 q_depth = rcb->q_depth;
352 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
356 unmap = &unmap_q->unmap[prod];
358 if (unmap_q->reuse_pi < 0) {
359 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
360 unmap_q->alloc_order);
363 prev = &unmap_q->unmap[unmap_q->reuse_pi];
365 page_offset = prev->page_offset + unmap_q->map_size;
369 if (unlikely(!page)) {
370 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
371 rcb->rxq->rxbuf_alloc_failed++;
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
376 unmap_q->map_size, DMA_FROM_DEVICE);
377 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
379 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
380 rcb->rxq->rxbuf_map_failed++;
385 unmap->page_offset = page_offset;
386 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
387 unmap->vector.len = unmap_q->map_size;
388 page_offset += unmap_q->map_size;
390 if (page_offset < alloc_size)
391 unmap_q->reuse_pi = prod;
393 unmap_q->reuse_pi = -1;
395 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
396 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
397 BNA_QE_INDX_INC(prod, q_depth);
402 if (likely(alloced)) {
403 rcb->producer_index = prod;
405 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
406 bna_rxq_prod_indx_doorbell(rcb);
413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
415 u32 alloced, prod, q_depth, buff_sz;
416 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
417 struct bnad_rx_unmap *unmap;
418 struct bna_rxq_entry *rxent;
422 buff_sz = rcb->rxq->buffer_size;
423 prod = rcb->producer_index;
424 q_depth = rcb->q_depth;
428 unmap = &unmap_q->unmap[prod];
430 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
432 if (unlikely(!skb)) {
433 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
434 rcb->rxq->rxbuf_alloc_failed++;
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
439 buff_sz, DMA_FROM_DEVICE);
440 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
441 dev_kfree_skb_any(skb);
442 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
443 rcb->rxq->rxbuf_map_failed++;
448 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
449 unmap->vector.len = buff_sz;
451 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
452 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
453 BNA_QE_INDX_INC(prod, q_depth);
458 if (likely(alloced)) {
459 rcb->producer_index = prod;
461 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
462 bna_rxq_prod_indx_doorbell(rcb);
469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
471 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
474 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
475 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
478 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
479 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
481 bnad_rxq_refill_page(bnad, rcb, to_alloc);
484 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
486 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
487 BNA_CQ_EF_L4_CKSUM_OK)
489 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
490 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
491 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
492 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
493 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
494 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
495 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
496 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
500 u32 sop_ci, u32 nvecs)
502 struct bnad_rx_unmap_q *unmap_q;
503 struct bnad_rx_unmap *unmap;
506 unmap_q = rcb->unmap_q;
507 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
508 unmap = &unmap_q->unmap[ci];
509 BNA_QE_INDX_INC(ci, rcb->q_depth);
511 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
512 bnad_rxq_cleanup_skb(bnad, unmap);
514 bnad_rxq_cleanup_page(bnad, unmap);
519 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
523 struct bnad_rx_unmap_q *unmap_q;
524 struct bna_cq_entry *cq, *cmpl;
525 u32 ci, pi, totlen = 0;
528 pi = ccb->producer_index;
531 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
532 unmap_q = rcb->unmap_q;
534 ci = rcb->consumer_index;
536 /* prefetch header */
537 prefetch(page_address(unmap_q->unmap[ci].page) +
538 unmap_q->unmap[ci].page_offset);
541 struct bnad_rx_unmap *unmap;
544 unmap = &unmap_q->unmap[ci];
545 BNA_QE_INDX_INC(ci, rcb->q_depth);
547 dma_unmap_page(&bnad->pcidev->dev,
548 dma_unmap_addr(&unmap->vector, dma_addr),
549 unmap->vector.len, DMA_FROM_DEVICE);
551 len = ntohs(cmpl->length);
552 skb->truesize += unmap->vector.len;
555 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
556 unmap->page, unmap->page_offset, len);
559 unmap->vector.len = 0;
561 BNA_QE_INDX_INC(pi, ccb->q_depth);
566 skb->data_len += totlen;
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571 struct bnad_rx_unmap *unmap, u32 len)
575 dma_unmap_single(&bnad->pcidev->dev,
576 dma_unmap_addr(&unmap->vector, dma_addr),
577 unmap->vector.len, DMA_FROM_DEVICE);
580 skb->protocol = eth_type_trans(skb, bnad->netdev);
583 unmap->vector.len = 0;
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
589 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590 struct bna_rcb *rcb = NULL;
591 struct bnad_rx_unmap_q *unmap_q;
592 struct bnad_rx_unmap *unmap = NULL;
593 struct sk_buff *skb = NULL;
594 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596 u32 packets = 0, len = 0, totlen = 0;
597 u32 pi, vec, sop_ci = 0, nvecs = 0;
598 u32 flags, masked_flags;
600 prefetch(bnad->netdev);
604 while (packets < budget) {
605 cmpl = &cq[ccb->producer_index];
608 /* The 'valid' field is set by the adapter, only after writing
609 * the other fields of completion entry. Hence, do not load
610 * other fields of completion entry *before* the 'valid' is
611 * loaded. Adding the rmb() here prevents the compiler and/or
612 * CPU from reordering the reads which would potentially result
613 * in reading stale values in completion entry.
617 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
619 if (bna_is_small_rxq(cmpl->rxq_id))
624 unmap_q = rcb->unmap_q;
626 /* start of packet ci */
627 sop_ci = rcb->consumer_index;
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630 unmap = &unmap_q->unmap[sop_ci];
633 skb = napi_get_frags(&rx_ctrl->napi);
639 flags = ntohl(cmpl->flags);
640 len = ntohs(cmpl->length);
644 /* Check all the completions for this frame.
645 * busy-wait doesn't help much, break here.
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648 (flags & BNA_CQ_EF_EOP) == 0) {
649 pi = ccb->producer_index;
651 BNA_QE_INDX_INC(pi, ccb->q_depth);
654 if (!next_cmpl->valid)
656 /* The 'valid' field is set by the adapter, only
657 * after writing the other fields of completion
658 * entry. Hence, do not load other fields of
659 * completion entry *before* the 'valid' is
660 * loaded. Adding the rmb() here prevents the
661 * compiler and/or CPU from reordering the reads
662 * which would potentially result in reading
663 * stale values in completion entry.
667 len = ntohs(next_cmpl->length);
668 flags = ntohl(next_cmpl->flags);
672 } while ((flags & BNA_CQ_EF_EOP) == 0);
674 if (!next_cmpl->valid)
679 /* TODO: BNA_CQ_EF_LOCAL ? */
680 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
681 BNA_CQ_EF_FCS_ERROR |
682 BNA_CQ_EF_TOO_LONG))) {
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
684 rcb->rxq->rx_packets_with_error++;
689 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
690 bnad_cq_setup_skb(bnad, skb, unmap, len);
692 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
694 rcb->rxq->rx_packets++;
695 rcb->rxq->rx_bytes += totlen;
696 ccb->bytes_per_intr += totlen;
698 masked_flags = flags & flags_cksum_prot_mask;
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702 ((masked_flags == flags_tcp4) ||
703 (masked_flags == flags_udp4) ||
704 (masked_flags == flags_tcp6) ||
705 (masked_flags == flags_udp6))))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
708 skb_checksum_none_assert(skb);
710 if ((flags & BNA_CQ_EF_VLAN) &&
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715 netif_receive_skb(skb);
717 napi_gro_frags(&rx_ctrl->napi);
720 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721 for (vec = 0; vec < nvecs; vec++) {
722 cmpl = &cq[ccb->producer_index];
724 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
728 napi_gro_flush(&rx_ctrl->napi, false);
729 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
732 bnad_rxq_post(bnad, ccb->rcb[0]);
734 bnad_rxq_post(bnad, ccb->rcb[1]);
740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
742 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743 struct napi_struct *napi = &rx_ctrl->napi;
745 if (likely(napi_schedule_prep(napi))) {
746 __napi_schedule(napi);
747 rx_ctrl->rx_schedule++;
751 /* MSIX Rx Path Handler */
753 bnad_msix_rx(int irq, void *data)
755 struct bna_ccb *ccb = (struct bna_ccb *)data;
758 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
765 /* Interrupt handlers */
767 /* Mbox Interrupt Handlers */
769 bnad_msix_mbox_handler(int irq, void *data)
773 struct bnad *bnad = (struct bnad *)data;
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
781 bna_intr_status_get(&bnad->bna, intr_status);
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784 bna_mbox_handler(&bnad->bna, intr_status);
786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
792 bnad_isr(int irq, void *data)
797 struct bnad *bnad = (struct bnad *)data;
798 struct bnad_rx_info *rx_info;
799 struct bnad_rx_ctrl *rx_ctrl;
800 struct bna_tcb *tcb = NULL;
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
808 bna_intr_status_get(&bnad->bna, intr_status);
810 if (unlikely(!intr_status)) {
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816 bna_mbox_handler(&bnad->bna, intr_status);
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
820 if (!BNA_IS_INTX_DATA_INTR(intr_status))
823 /* Process data interrupts */
825 for (i = 0; i < bnad->num_tx; i++) {
826 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827 tcb = bnad->tx_info[i].tcb[j];
828 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
833 for (i = 0; i < bnad->num_rx; i++) {
834 rx_info = &bnad->rx_info[i];
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838 rx_ctrl = &rx_info->rx_ctrl[j];
840 bnad_netif_rx_schedule_poll(bnad,
848 * Called in interrupt / callback context
849 * with bna_lock held, so cfg_flags access is OK
852 bnad_enable_mbox_irq(struct bnad *bnad)
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
860 * Called with bnad->bna_lock held b'cos of
861 * bnad->cfg_flags access.
864 bnad_disable_mbox_irq(struct bnad *bnad)
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
872 bnad_set_netdev_perm_addr(struct bnad *bnad)
874 struct net_device *netdev = bnad->netdev;
876 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
877 if (is_zero_ether_addr(netdev->dev_addr))
878 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
881 /* Control Path Handlers */
885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
887 bnad_enable_mbox_irq(bnad);
891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
893 bnad_disable_mbox_irq(bnad);
897 bnad_cb_ioceth_ready(struct bnad *bnad)
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900 complete(&bnad->bnad_completions.ioc_comp);
904 bnad_cb_ioceth_failed(struct bnad *bnad)
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907 complete(&bnad->bnad_completions.ioc_comp);
911 bnad_cb_ioceth_disabled(struct bnad *bnad)
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914 complete(&bnad->bnad_completions.ioc_comp);
918 bnad_cb_enet_disabled(void *arg)
920 struct bnad *bnad = (struct bnad *)arg;
922 netif_carrier_off(bnad->netdev);
923 complete(&bnad->bnad_completions.enet_comp);
927 bnad_cb_ethport_link_status(struct bnad *bnad,
928 enum bna_link_status link_status)
930 bool link_up = false;
932 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
934 if (link_status == BNA_CEE_UP) {
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936 BNAD_UPDATE_CTR(bnad, cee_toggle);
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940 BNAD_UPDATE_CTR(bnad, cee_toggle);
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
945 if (!netif_carrier_ok(bnad->netdev)) {
947 netdev_info(bnad->netdev, "link up\n");
948 netif_carrier_on(bnad->netdev);
949 BNAD_UPDATE_CTR(bnad, link_toggle);
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
953 struct bna_tcb *tcb =
954 bnad->tx_info[tx_id].tcb[tcb_id];
961 if (test_bit(BNAD_TXQ_TX_STARTED,
965 * Transmit Schedule */
969 BNAD_UPDATE_CTR(bnad,
975 BNAD_UPDATE_CTR(bnad,
982 if (netif_carrier_ok(bnad->netdev)) {
983 netdev_info(bnad->netdev, "link down\n");
984 netif_carrier_off(bnad->netdev);
985 BNAD_UPDATE_CTR(bnad, link_toggle);
991 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
993 struct bnad *bnad = (struct bnad *)arg;
995 complete(&bnad->bnad_completions.tx_comp);
999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1001 struct bnad_tx_info *tx_info =
1002 (struct bnad_tx_info *)tcb->txq->tx->priv;
1005 tx_info->tcb[tcb->id] = tcb;
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1011 struct bnad_tx_info *tx_info =
1012 (struct bnad_tx_info *)tcb->txq->tx->priv;
1014 tx_info->tcb[tcb->id] = NULL;
1019 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1021 struct bnad_rx_info *rx_info =
1022 (struct bnad_rx_info *)ccb->cq->rx->priv;
1024 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1029 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1031 struct bnad_rx_info *rx_info =
1032 (struct bnad_rx_info *)ccb->cq->rx->priv;
1034 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1038 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1040 struct bnad_tx_info *tx_info =
1041 (struct bnad_tx_info *)tx->priv;
1042 struct bna_tcb *tcb;
1046 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047 tcb = tx_info->tcb[i];
1051 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052 netif_stop_subqueue(bnad->netdev, txq_id);
1057 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1059 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1060 struct bna_tcb *tcb;
1064 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065 tcb = tx_info->tcb[i];
1070 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072 BUG_ON(*(tcb->hw_consumer_index) != 0);
1074 if (netif_carrier_ok(bnad->netdev)) {
1075 netif_wake_subqueue(bnad->netdev, txq_id);
1076 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1081 * Workaround for first ioceth enable failure & we
1082 * get a 0 MAC address. We try to get the MAC address
1085 if (is_zero_ether_addr(bnad->perm_addr)) {
1086 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087 bnad_set_netdev_perm_addr(bnad);
1092 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1095 bnad_tx_cleanup(struct delayed_work *work)
1097 struct bnad_tx_info *tx_info =
1098 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1099 struct bnad *bnad = NULL;
1100 struct bna_tcb *tcb;
1101 unsigned long flags;
1104 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105 tcb = tx_info->tcb[i];
1111 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1116 bnad_txq_cleanup(bnad, tcb);
1118 smp_mb__before_atomic();
1119 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1123 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124 msecs_to_jiffies(1));
1128 spin_lock_irqsave(&bnad->bna_lock, flags);
1129 bna_tx_cleanup_complete(tx_info->tx);
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1134 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1136 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1137 struct bna_tcb *tcb;
1140 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141 tcb = tx_info->tcb[i];
1146 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1150 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1152 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1153 struct bna_ccb *ccb;
1154 struct bnad_rx_ctrl *rx_ctrl;
1157 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158 rx_ctrl = &rx_info->rx_ctrl[i];
1163 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1166 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1171 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1174 bnad_rx_cleanup(void *work)
1176 struct bnad_rx_info *rx_info =
1177 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178 struct bnad_rx_ctrl *rx_ctrl;
1179 struct bnad *bnad = NULL;
1180 unsigned long flags;
1183 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184 rx_ctrl = &rx_info->rx_ctrl[i];
1189 bnad = rx_ctrl->ccb->bnad;
1192 * Wait till the poll handler has exited
1193 * and nothing can be scheduled anymore
1195 napi_disable(&rx_ctrl->napi);
1197 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199 if (rx_ctrl->ccb->rcb[1])
1200 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1203 spin_lock_irqsave(&bnad->bna_lock, flags);
1204 bna_rx_cleanup_complete(rx_info->rx);
1205 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1209 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1211 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1212 struct bna_ccb *ccb;
1213 struct bnad_rx_ctrl *rx_ctrl;
1216 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217 rx_ctrl = &rx_info->rx_ctrl[i];
1222 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1225 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1228 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1232 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1234 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235 struct bna_ccb *ccb;
1236 struct bna_rcb *rcb;
1237 struct bnad_rx_ctrl *rx_ctrl;
1240 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241 rx_ctrl = &rx_info->rx_ctrl[i];
1246 napi_enable(&rx_ctrl->napi);
1248 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1253 bnad_rxq_alloc_init(bnad, rcb);
1254 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256 bnad_rxq_post(bnad, rcb);
1262 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1264 struct bnad *bnad = (struct bnad *)arg;
1266 complete(&bnad->bnad_completions.rx_comp);
1270 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1272 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273 complete(&bnad->bnad_completions.mcast_comp);
1277 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278 struct bna_stats *stats)
1280 if (status == BNA_CB_SUCCESS)
1281 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1283 if (!netif_running(bnad->netdev) ||
1284 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1287 mod_timer(&bnad->stats_timer,
1288 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1292 bnad_cb_enet_mtu_set(struct bnad *bnad)
1294 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295 complete(&bnad->bnad_completions.mtu_comp);
1299 bnad_cb_completion(void *arg, enum bfa_status status)
1301 struct bnad_iocmd_comp *iocmd_comp =
1302 (struct bnad_iocmd_comp *)arg;
1304 iocmd_comp->comp_status = (u32) status;
1305 complete(&iocmd_comp->comp);
1308 /* Resource allocation, free functions */
1311 bnad_mem_free(struct bnad *bnad,
1312 struct bna_mem_info *mem_info)
1317 if (mem_info->mdl == NULL)
1320 for (i = 0; i < mem_info->num; i++) {
1321 if (mem_info->mdl[i].kva != NULL) {
1322 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1325 dma_free_coherent(&bnad->pcidev->dev,
1326 mem_info->mdl[i].len,
1327 mem_info->mdl[i].kva, dma_pa);
1329 kfree(mem_info->mdl[i].kva);
1332 kfree(mem_info->mdl);
1333 mem_info->mdl = NULL;
1337 bnad_mem_alloc(struct bnad *bnad,
1338 struct bna_mem_info *mem_info)
1343 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344 mem_info->mdl = NULL;
1348 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1350 if (mem_info->mdl == NULL)
1353 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354 for (i = 0; i < mem_info->num; i++) {
1355 mem_info->mdl[i].len = mem_info->len;
1356 mem_info->mdl[i].kva =
1357 dma_alloc_coherent(&bnad->pcidev->dev,
1358 mem_info->len, &dma_pa,
1360 if (mem_info->mdl[i].kva == NULL)
1363 BNA_SET_DMA_ADDR(dma_pa,
1364 &(mem_info->mdl[i].dma));
1367 for (i = 0; i < mem_info->num; i++) {
1368 mem_info->mdl[i].len = mem_info->len;
1369 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1371 if (mem_info->mdl[i].kva == NULL)
1379 bnad_mem_free(bnad, mem_info);
1383 /* Free IRQ for Mailbox */
1385 bnad_mbox_irq_free(struct bnad *bnad)
1388 unsigned long flags;
1390 spin_lock_irqsave(&bnad->bna_lock, flags);
1391 bnad_disable_mbox_irq(bnad);
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1394 irq = BNAD_GET_MBOX_IRQ(bnad);
1395 free_irq(irq, bnad);
1399 * Allocates IRQ for Mailbox, but keep it disabled
1400 * This will be enabled once we get the mbox enable callback
1404 bnad_mbox_irq_alloc(struct bnad *bnad)
1407 unsigned long irq_flags, flags;
1409 irq_handler_t irq_handler;
1411 spin_lock_irqsave(&bnad->bna_lock, flags);
1412 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1417 irq_handler = (irq_handler_t)bnad_isr;
1418 irq = bnad->pcidev->irq;
1419 irq_flags = IRQF_SHARED;
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1426 * Set the Mbox IRQ disable flag, so that the IRQ handler
1427 * called from request_irq() for SHARED IRQs do not execute
1429 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1431 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1433 err = request_irq(irq, irq_handler, irq_flags,
1434 bnad->mbox_irq_name, bnad);
1440 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1442 kfree(intr_info->idl);
1443 intr_info->idl = NULL;
1446 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1448 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449 u32 txrx_id, struct bna_intr_info *intr_info)
1451 int i, vector_start = 0;
1453 unsigned long flags;
1455 spin_lock_irqsave(&bnad->bna_lock, flags);
1456 cfg_flags = bnad->cfg_flags;
1457 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1459 if (cfg_flags & BNAD_CF_MSIX) {
1460 intr_info->intr_type = BNA_INTR_T_MSIX;
1461 intr_info->idl = kcalloc(intr_info->num,
1462 sizeof(struct bna_intr_descr),
1464 if (!intr_info->idl)
1469 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1473 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474 (bnad->num_tx * bnad->num_txq_per_tx) +
1482 for (i = 0; i < intr_info->num; i++)
1483 intr_info->idl[i].vector = vector_start + i;
1485 intr_info->intr_type = BNA_INTR_T_INTX;
1487 intr_info->idl = kcalloc(intr_info->num,
1488 sizeof(struct bna_intr_descr),
1490 if (!intr_info->idl)
1495 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1499 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1506 /* NOTE: Should be called for MSIX only
1507 * Unregisters Tx MSIX vector(s) from the kernel
1510 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1516 for (i = 0; i < num_txqs; i++) {
1517 if (tx_info->tcb[i] == NULL)
1520 vector_num = tx_info->tcb[i]->intr_vector;
1521 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1525 /* NOTE: Should be called for MSIX only
1526 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1529 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530 u32 tx_id, int num_txqs)
1536 for (i = 0; i < num_txqs; i++) {
1537 vector_num = tx_info->tcb[i]->intr_vector;
1538 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539 tx_id + tx_info->tcb[i]->id);
1540 err = request_irq(bnad->msix_table[vector_num].vector,
1541 (irq_handler_t)bnad_msix_tx, 0,
1542 tx_info->tcb[i]->name,
1552 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1556 /* NOTE: Should be called for MSIX only
1557 * Unregisters Rx MSIX vector(s) from the kernel
1560 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1566 for (i = 0; i < num_rxps; i++) {
1567 if (rx_info->rx_ctrl[i].ccb == NULL)
1570 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571 free_irq(bnad->msix_table[vector_num].vector,
1572 rx_info->rx_ctrl[i].ccb);
1576 /* NOTE: Should be called for MSIX only
1577 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1580 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581 u32 rx_id, int num_rxps)
1587 for (i = 0; i < num_rxps; i++) {
1588 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1591 rx_id + rx_info->rx_ctrl[i].ccb->id);
1592 err = request_irq(bnad->msix_table[vector_num].vector,
1593 (irq_handler_t)bnad_msix_rx, 0,
1594 rx_info->rx_ctrl[i].ccb->name,
1595 rx_info->rx_ctrl[i].ccb);
1604 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1608 /* Free Tx object Resources */
1610 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1614 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615 if (res_info[i].res_type == BNA_RES_T_MEM)
1616 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617 else if (res_info[i].res_type == BNA_RES_T_INTR)
1618 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1622 /* Allocates memory and interrupt resources for Tx object */
1624 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1629 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630 if (res_info[i].res_type == BNA_RES_T_MEM)
1631 err = bnad_mem_alloc(bnad,
1632 &res_info[i].res_u.mem_info);
1633 else if (res_info[i].res_type == BNA_RES_T_INTR)
1634 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635 &res_info[i].res_u.intr_info);
1642 bnad_tx_res_free(bnad, res_info);
1646 /* Free Rx object Resources */
1648 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1652 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653 if (res_info[i].res_type == BNA_RES_T_MEM)
1654 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655 else if (res_info[i].res_type == BNA_RES_T_INTR)
1656 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1660 /* Allocates memory and interrupt resources for Rx object */
1662 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1667 /* All memory needs to be allocated before setup_ccbs */
1668 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669 if (res_info[i].res_type == BNA_RES_T_MEM)
1670 err = bnad_mem_alloc(bnad,
1671 &res_info[i].res_u.mem_info);
1672 else if (res_info[i].res_type == BNA_RES_T_INTR)
1673 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674 &res_info[i].res_u.intr_info);
1681 bnad_rx_res_free(bnad, res_info);
1685 /* Timer callbacks */
1688 bnad_ioc_timeout(struct timer_list *t)
1690 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691 unsigned long flags;
1693 spin_lock_irqsave(&bnad->bna_lock, flags);
1694 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1699 bnad_ioc_hb_check(struct timer_list *t)
1701 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702 unsigned long flags;
1704 spin_lock_irqsave(&bnad->bna_lock, flags);
1705 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1710 bnad_iocpf_timeout(struct timer_list *t)
1712 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713 unsigned long flags;
1715 spin_lock_irqsave(&bnad->bna_lock, flags);
1716 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1721 bnad_iocpf_sem_timeout(struct timer_list *t)
1723 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724 unsigned long flags;
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1732 * All timer routines use bnad->bna_lock to protect against
1733 * the following race, which may occur in case of no locking:
1741 /* b) Dynamic Interrupt Moderation Timer */
1743 bnad_dim_timeout(struct timer_list *t)
1745 struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746 struct bnad_rx_info *rx_info;
1747 struct bnad_rx_ctrl *rx_ctrl;
1749 unsigned long flags;
1751 if (!netif_carrier_ok(bnad->netdev))
1754 spin_lock_irqsave(&bnad->bna_lock, flags);
1755 for (i = 0; i < bnad->num_rx; i++) {
1756 rx_info = &bnad->rx_info[i];
1759 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760 rx_ctrl = &rx_info->rx_ctrl[j];
1763 bna_rx_dim_update(rx_ctrl->ccb);
1767 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1768 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769 mod_timer(&bnad->dim_timer,
1770 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1774 /* c) Statistics Timer */
1776 bnad_stats_timeout(struct timer_list *t)
1778 struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779 unsigned long flags;
1781 if (!netif_running(bnad->netdev) ||
1782 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1785 spin_lock_irqsave(&bnad->bna_lock, flags);
1786 bna_hw_stats_get(&bnad->bna);
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1791 * Set up timer for DIM
1792 * Called with bnad->bna_lock held
1795 bnad_dim_timer_start(struct bnad *bnad)
1797 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801 mod_timer(&bnad->dim_timer,
1802 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1807 * Set up timer for statistics
1808 * Called with mutex_lock(&bnad->conf_mutex) held
1811 bnad_stats_timer_start(struct bnad *bnad)
1813 unsigned long flags;
1815 spin_lock_irqsave(&bnad->bna_lock, flags);
1816 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818 mod_timer(&bnad->stats_timer,
1819 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1821 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1825 * Stops the stats timer
1826 * Called with mutex_lock(&bnad->conf_mutex) held
1829 bnad_stats_timer_stop(struct bnad *bnad)
1832 unsigned long flags;
1834 spin_lock_irqsave(&bnad->bna_lock, flags);
1835 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1839 del_timer_sync(&bnad->stats_timer);
1845 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1847 int i = 1; /* Index 0 has broadcast address */
1848 struct netdev_hw_addr *mc_addr;
1850 netdev_for_each_mc_addr(mc_addr, netdev) {
1851 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1857 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1859 struct bnad_rx_ctrl *rx_ctrl =
1860 container_of(napi, struct bnad_rx_ctrl, napi);
1861 struct bnad *bnad = rx_ctrl->bnad;
1864 rx_ctrl->rx_poll_ctr++;
1866 if (!netif_carrier_ok(bnad->netdev))
1869 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1874 napi_complete_done(napi, rcvd);
1876 rx_ctrl->rx_complete++;
1879 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1884 #define BNAD_NAPI_POLL_QUOTA 64
1886 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1888 struct bnad_rx_ctrl *rx_ctrl;
1891 /* Initialize & enable NAPI */
1892 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1893 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1894 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1895 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1900 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1904 /* First disable and then clean up */
1905 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1906 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1909 /* Should be held with conf_lock held */
1911 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1913 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1914 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1915 unsigned long flags;
1920 init_completion(&bnad->bnad_completions.tx_comp);
1921 spin_lock_irqsave(&bnad->bna_lock, flags);
1922 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1923 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924 wait_for_completion(&bnad->bnad_completions.tx_comp);
1926 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1927 bnad_tx_msix_unregister(bnad, tx_info,
1928 bnad->num_txq_per_tx);
1930 spin_lock_irqsave(&bnad->bna_lock, flags);
1931 bna_tx_destroy(tx_info->tx);
1932 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1937 bnad_tx_res_free(bnad, res_info);
1940 /* Should be held with conf_lock held */
1942 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1945 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1946 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1947 struct bna_intr_info *intr_info =
1948 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1949 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1950 static const struct bna_tx_event_cbfn tx_cbfn = {
1951 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1952 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1953 .tx_stall_cbfn = bnad_cb_tx_stall,
1954 .tx_resume_cbfn = bnad_cb_tx_resume,
1955 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1959 unsigned long flags;
1961 tx_info->tx_id = tx_id;
1963 /* Initialize the Tx object configuration */
1964 tx_config->num_txq = bnad->num_txq_per_tx;
1965 tx_config->txq_depth = bnad->txq_depth;
1966 tx_config->tx_type = BNA_TX_T_REGULAR;
1967 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1969 /* Get BNA's resource requirement for one tx object */
1970 spin_lock_irqsave(&bnad->bna_lock, flags);
1971 bna_tx_res_req(bnad->num_txq_per_tx,
1972 bnad->txq_depth, res_info);
1973 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1975 /* Fill Unmap Q memory requirements */
1976 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1977 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1980 /* Allocate resources */
1981 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1985 /* Ask BNA to create one Tx object, supplying required resources */
1986 spin_lock_irqsave(&bnad->bna_lock, flags);
1987 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1989 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1996 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1997 (work_func_t)bnad_tx_cleanup);
1999 /* Register ISR for the Tx object */
2000 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2001 err = bnad_tx_msix_register(bnad, tx_info,
2002 tx_id, bnad->num_txq_per_tx);
2007 spin_lock_irqsave(&bnad->bna_lock, flags);
2009 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2014 spin_lock_irqsave(&bnad->bna_lock, flags);
2015 bna_tx_destroy(tx_info->tx);
2016 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2020 bnad_tx_res_free(bnad, res_info);
2024 /* Setup the rx config for bna_rx_create */
2025 /* bnad decides the configuration */
2027 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2029 memset(rx_config, 0, sizeof(*rx_config));
2030 rx_config->rx_type = BNA_RX_T_REGULAR;
2031 rx_config->num_paths = bnad->num_rxp_per_rx;
2032 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2034 if (bnad->num_rxp_per_rx > 1) {
2035 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2036 rx_config->rss_config.hash_type =
2037 (BFI_ENET_RSS_IPV6 |
2038 BFI_ENET_RSS_IPV6_TCP |
2040 BFI_ENET_RSS_IPV4_TCP);
2041 rx_config->rss_config.hash_mask =
2042 bnad->num_rxp_per_rx - 1;
2043 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2044 sizeof(rx_config->rss_config.toeplitz_hash_key));
2046 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2047 memset(&rx_config->rss_config, 0,
2048 sizeof(rx_config->rss_config));
2051 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2052 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2054 /* BNA_RXP_SINGLE - one data-buffer queue
2055 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2056 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2058 /* TODO: configurable param for queue type */
2059 rx_config->rxp_type = BNA_RXP_SLR;
2061 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2062 rx_config->frame_size > 4096) {
2063 /* though size_routing_enable is set in SLR,
2064 * small packets may get routed to same rxq.
2065 * set buf_size to 2048 instead of PAGE_SIZE.
2067 rx_config->q0_buf_size = 2048;
2068 /* this should be in multiples of 2 */
2069 rx_config->q0_num_vecs = 4;
2070 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2071 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2073 rx_config->q0_buf_size = rx_config->frame_size;
2074 rx_config->q0_num_vecs = 1;
2075 rx_config->q0_depth = bnad->rxq_depth;
2078 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2079 if (rx_config->rxp_type == BNA_RXP_SLR) {
2080 rx_config->q1_depth = bnad->rxq_depth;
2081 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2084 rx_config->vlan_strip_status =
2085 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2086 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2090 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2092 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2095 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2096 rx_info->rx_ctrl[i].bnad = bnad;
2099 /* Called with mutex_lock(&bnad->conf_mutex) held */
2101 bnad_reinit_rx(struct bnad *bnad)
2103 struct net_device *netdev = bnad->netdev;
2104 u32 err = 0, current_err = 0;
2105 u32 rx_id = 0, count = 0;
2106 unsigned long flags;
2108 /* destroy and create new rx objects */
2109 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2110 if (!bnad->rx_info[rx_id].rx)
2112 bnad_destroy_rx(bnad, rx_id);
2115 spin_lock_irqsave(&bnad->bna_lock, flags);
2116 bna_enet_mtu_set(&bnad->bna.enet,
2117 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2118 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2120 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2122 current_err = bnad_setup_rx(bnad, rx_id);
2123 if (current_err && !err) {
2125 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2129 /* restore rx configuration */
2130 if (bnad->rx_info[0].rx && !err) {
2131 bnad_restore_vlans(bnad, 0);
2132 bnad_enable_default_bcast(bnad);
2133 spin_lock_irqsave(&bnad->bna_lock, flags);
2134 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2135 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2136 bnad_set_rx_mode(netdev);
2142 /* Called with bnad_conf_lock() held */
2144 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2146 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2147 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2148 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2149 unsigned long flags;
2156 spin_lock_irqsave(&bnad->bna_lock, flags);
2157 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2158 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2159 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2162 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2164 del_timer_sync(&bnad->dim_timer);
2167 init_completion(&bnad->bnad_completions.rx_comp);
2168 spin_lock_irqsave(&bnad->bna_lock, flags);
2169 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171 wait_for_completion(&bnad->bnad_completions.rx_comp);
2173 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2174 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2176 bnad_napi_delete(bnad, rx_id);
2178 spin_lock_irqsave(&bnad->bna_lock, flags);
2179 bna_rx_destroy(rx_info->rx);
2183 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185 bnad_rx_res_free(bnad, res_info);
2188 /* Called with mutex_lock(&bnad->conf_mutex) held */
2190 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2193 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2194 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2195 struct bna_intr_info *intr_info =
2196 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2197 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2198 static const struct bna_rx_event_cbfn rx_cbfn = {
2199 .rcb_setup_cbfn = NULL,
2200 .rcb_destroy_cbfn = NULL,
2201 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2202 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2203 .rx_stall_cbfn = bnad_cb_rx_stall,
2204 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2205 .rx_post_cbfn = bnad_cb_rx_post,
2208 unsigned long flags;
2210 rx_info->rx_id = rx_id;
2212 /* Initialize the Rx object configuration */
2213 bnad_init_rx_config(bnad, rx_config);
2215 /* Get BNA's resource requirement for one Rx object */
2216 spin_lock_irqsave(&bnad->bna_lock, flags);
2217 bna_rx_res_req(rx_config, res_info);
2218 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2220 /* Fill Unmap Q memory requirements */
2221 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2222 rx_config->num_paths,
2223 (rx_config->q0_depth *
2224 sizeof(struct bnad_rx_unmap)) +
2225 sizeof(struct bnad_rx_unmap_q));
2227 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2228 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2229 rx_config->num_paths,
2230 (rx_config->q1_depth *
2231 sizeof(struct bnad_rx_unmap) +
2232 sizeof(struct bnad_rx_unmap_q)));
2234 /* Allocate resource */
2235 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2239 bnad_rx_ctrl_init(bnad, rx_id);
2241 /* Ask BNA to create one Rx object, supplying required resources */
2242 spin_lock_irqsave(&bnad->bna_lock, flags);
2243 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2251 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2253 INIT_WORK(&rx_info->rx_cleanup_work,
2254 (work_func_t)(bnad_rx_cleanup));
2257 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2258 * so that IRQ handler cannot schedule NAPI at this point.
2260 bnad_napi_add(bnad, rx_id);
2262 /* Register ISR for the Rx object */
2263 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2264 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2265 rx_config->num_paths);
2270 spin_lock_irqsave(&bnad->bna_lock, flags);
2272 /* Set up Dynamic Interrupt Moderation Vector */
2273 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2274 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2276 /* Enable VLAN filtering only on the default Rx */
2277 bna_rx_vlanfilter_enable(rx);
2279 /* Start the DIM timer */
2280 bnad_dim_timer_start(bnad);
2284 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2289 bnad_destroy_rx(bnad, rx_id);
2293 /* Called with conf_lock & bnad->bna_lock held */
2295 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2297 struct bnad_tx_info *tx_info;
2299 tx_info = &bnad->tx_info[0];
2303 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2306 /* Called with conf_lock & bnad->bna_lock held */
2308 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2310 struct bnad_rx_info *rx_info;
2313 for (i = 0; i < bnad->num_rx; i++) {
2314 rx_info = &bnad->rx_info[i];
2317 bna_rx_coalescing_timeo_set(rx_info->rx,
2318 bnad->rx_coalescing_timeo);
2323 * Called with bnad->bna_lock held
2326 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2330 if (!is_valid_ether_addr(mac_addr))
2331 return -EADDRNOTAVAIL;
2333 /* If datapath is down, pretend everything went through */
2334 if (!bnad->rx_info[0].rx)
2337 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2338 if (ret != BNA_CB_SUCCESS)
2339 return -EADDRNOTAVAIL;
2344 /* Should be called with conf_lock held */
2346 bnad_enable_default_bcast(struct bnad *bnad)
2348 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2350 unsigned long flags;
2352 init_completion(&bnad->bnad_completions.mcast_comp);
2354 spin_lock_irqsave(&bnad->bna_lock, flags);
2355 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2356 bnad_cb_rx_mcast_add);
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2359 if (ret == BNA_CB_SUCCESS)
2360 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2364 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2370 /* Called with mutex_lock(&bnad->conf_mutex) held */
2372 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2375 unsigned long flags;
2377 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2378 spin_lock_irqsave(&bnad->bna_lock, flags);
2379 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2384 /* Statistics utilities */
2386 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2390 for (i = 0; i < bnad->num_rx; i++) {
2391 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2392 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2393 stats->rx_packets += bnad->rx_info[i].
2394 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2395 stats->rx_bytes += bnad->rx_info[i].
2396 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2397 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2398 bnad->rx_info[i].rx_ctrl[j].ccb->
2400 stats->rx_packets +=
2401 bnad->rx_info[i].rx_ctrl[j].
2402 ccb->rcb[1]->rxq->rx_packets;
2404 bnad->rx_info[i].rx_ctrl[j].
2405 ccb->rcb[1]->rxq->rx_bytes;
2410 for (i = 0; i < bnad->num_tx; i++) {
2411 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2412 if (bnad->tx_info[i].tcb[j]) {
2413 stats->tx_packets +=
2414 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2416 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2423 * Must be called with the bna_lock held.
2426 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2428 struct bfi_enet_stats_mac *mac_stats;
2432 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2434 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2435 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2436 mac_stats->rx_undersize;
2437 stats->tx_errors = mac_stats->tx_fcs_error +
2438 mac_stats->tx_undersize;
2439 stats->rx_dropped = mac_stats->rx_drop;
2440 stats->tx_dropped = mac_stats->tx_drop;
2441 stats->multicast = mac_stats->rx_multicast;
2442 stats->collisions = mac_stats->tx_total_collision;
2444 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2446 /* receive ring buffer overflow ?? */
2448 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2449 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2450 /* recv'r fifo overrun */
2451 bmap = bna_rx_rid_mask(&bnad->bna);
2452 for (i = 0; bmap; i++) {
2454 stats->rx_fifo_errors +=
2455 bnad->stats.bna_stats->
2456 hw_stats.rxf_stats[i].frame_drops;
2464 bnad_mbox_irq_sync(struct bnad *bnad)
2467 unsigned long flags;
2469 spin_lock_irqsave(&bnad->bna_lock, flags);
2470 if (bnad->cfg_flags & BNAD_CF_MSIX)
2471 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2473 irq = bnad->pcidev->irq;
2474 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2476 synchronize_irq(irq);
2479 /* Utility used by bnad_start_xmit, for doing TSO */
2481 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2485 err = skb_cow_head(skb, 0);
2487 BNAD_UPDATE_CTR(bnad, tso_err);
2492 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2493 * excluding the length field.
2495 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2496 struct iphdr *iph = ip_hdr(skb);
2498 /* Do we really need these? */
2502 tcp_hdr(skb)->check =
2503 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2505 BNAD_UPDATE_CTR(bnad, tso4);
2507 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2509 ipv6h->payload_len = 0;
2510 tcp_hdr(skb)->check =
2511 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2513 BNAD_UPDATE_CTR(bnad, tso6);
2520 * Initialize Q numbers depending on Rx Paths
2521 * Called with bnad->bna_lock held, because of cfg_flags
2525 bnad_q_num_init(struct bnad *bnad)
2529 rxps = min((uint)num_online_cpus(),
2530 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2532 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2533 rxps = 1; /* INTx */
2537 bnad->num_rxp_per_rx = rxps;
2538 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2542 * Adjusts the Q numbers, given a number of msix vectors
2543 * Give preference to RSS as opposed to Tx priority Queues,
2544 * in such a case, just use 1 Tx Q
2545 * Called with bnad->bna_lock held b'cos of cfg_flags access
2548 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2550 bnad->num_txq_per_tx = 1;
2551 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2552 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2553 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2554 bnad->num_rxp_per_rx = msix_vectors -
2555 (bnad->num_tx * bnad->num_txq_per_tx) -
2556 BNAD_MAILBOX_MSIX_VECTORS;
2558 bnad->num_rxp_per_rx = 1;
2561 /* Enable / disable ioceth */
2563 bnad_ioceth_disable(struct bnad *bnad)
2565 unsigned long flags;
2568 spin_lock_irqsave(&bnad->bna_lock, flags);
2569 init_completion(&bnad->bnad_completions.ioc_comp);
2570 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2571 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2573 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2574 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2576 err = bnad->bnad_completions.ioc_comp_status;
2581 bnad_ioceth_enable(struct bnad *bnad)
2584 unsigned long flags;
2586 spin_lock_irqsave(&bnad->bna_lock, flags);
2587 init_completion(&bnad->bnad_completions.ioc_comp);
2588 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2589 bna_ioceth_enable(&bnad->bna.ioceth);
2590 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2592 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2593 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2595 err = bnad->bnad_completions.ioc_comp_status;
2600 /* Free BNA resources */
2602 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2607 for (i = 0; i < res_val_max; i++)
2608 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2611 /* Allocates memory and interrupt resources for BNA */
2613 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2618 for (i = 0; i < res_val_max; i++) {
2619 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2626 bnad_res_free(bnad, res_info, res_val_max);
2630 /* Interrupt enable / disable */
2632 bnad_enable_msix(struct bnad *bnad)
2635 unsigned long flags;
2637 spin_lock_irqsave(&bnad->bna_lock, flags);
2638 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2639 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2642 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2644 if (bnad->msix_table)
2648 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2650 if (!bnad->msix_table)
2653 for (i = 0; i < bnad->msix_num; i++)
2654 bnad->msix_table[i].entry = i;
2656 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2660 } else if (ret < bnad->msix_num) {
2661 dev_warn(&bnad->pcidev->dev,
2662 "%d MSI-X vectors allocated < %d requested\n",
2663 ret, bnad->msix_num);
2665 spin_lock_irqsave(&bnad->bna_lock, flags);
2666 /* ret = #of vectors that we got */
2667 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2668 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2669 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2671 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2672 BNAD_MAILBOX_MSIX_VECTORS;
2674 if (bnad->msix_num > ret) {
2675 pci_disable_msix(bnad->pcidev);
2680 pci_intx(bnad->pcidev, 0);
2685 dev_warn(&bnad->pcidev->dev,
2686 "MSI-X enable failed - operating in INTx mode\n");
2688 kfree(bnad->msix_table);
2689 bnad->msix_table = NULL;
2691 spin_lock_irqsave(&bnad->bna_lock, flags);
2692 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2693 bnad_q_num_init(bnad);
2694 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2698 bnad_disable_msix(struct bnad *bnad)
2701 unsigned long flags;
2703 spin_lock_irqsave(&bnad->bna_lock, flags);
2704 cfg_flags = bnad->cfg_flags;
2705 if (bnad->cfg_flags & BNAD_CF_MSIX)
2706 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2707 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2709 if (cfg_flags & BNAD_CF_MSIX) {
2710 pci_disable_msix(bnad->pcidev);
2711 kfree(bnad->msix_table);
2712 bnad->msix_table = NULL;
2716 /* Netdev entry points */
2718 bnad_open(struct net_device *netdev)
2721 struct bnad *bnad = netdev_priv(netdev);
2722 struct bna_pause_config pause_config;
2723 unsigned long flags;
2725 mutex_lock(&bnad->conf_mutex);
2728 err = bnad_setup_tx(bnad, 0);
2733 err = bnad_setup_rx(bnad, 0);
2738 pause_config.tx_pause = 0;
2739 pause_config.rx_pause = 0;
2741 spin_lock_irqsave(&bnad->bna_lock, flags);
2742 bna_enet_mtu_set(&bnad->bna.enet,
2743 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2744 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2745 bna_enet_enable(&bnad->bna.enet);
2746 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2748 /* Enable broadcast */
2749 bnad_enable_default_bcast(bnad);
2751 /* Restore VLANs, if any */
2752 bnad_restore_vlans(bnad, 0);
2754 /* Set the UCAST address */
2755 spin_lock_irqsave(&bnad->bna_lock, flags);
2756 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2757 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2759 /* Start the stats timer */
2760 bnad_stats_timer_start(bnad);
2762 mutex_unlock(&bnad->conf_mutex);
2767 bnad_destroy_tx(bnad, 0);
2770 mutex_unlock(&bnad->conf_mutex);
2775 bnad_stop(struct net_device *netdev)
2777 struct bnad *bnad = netdev_priv(netdev);
2778 unsigned long flags;
2780 mutex_lock(&bnad->conf_mutex);
2782 /* Stop the stats timer */
2783 bnad_stats_timer_stop(bnad);
2785 init_completion(&bnad->bnad_completions.enet_comp);
2787 spin_lock_irqsave(&bnad->bna_lock, flags);
2788 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2789 bnad_cb_enet_disabled);
2790 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2792 wait_for_completion(&bnad->bnad_completions.enet_comp);
2794 bnad_destroy_tx(bnad, 0);
2795 bnad_destroy_rx(bnad, 0);
2797 /* Synchronize mailbox IRQ */
2798 bnad_mbox_irq_sync(bnad);
2800 mutex_unlock(&bnad->conf_mutex);
2806 /* Returns 0 for success */
2808 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2809 struct sk_buff *skb, struct bna_txq_entry *txqent)
2815 if (skb_vlan_tag_present(skb)) {
2816 vlan_tag = (u16)skb_vlan_tag_get(skb);
2817 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2819 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2820 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2821 | (vlan_tag & 0x1fff);
2822 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2824 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2826 if (skb_is_gso(skb)) {
2827 gso_size = skb_shinfo(skb)->gso_size;
2828 if (unlikely(gso_size > bnad->netdev->mtu)) {
2829 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2832 if (unlikely((gso_size + skb_transport_offset(skb) +
2833 tcp_hdrlen(skb)) >= skb->len)) {
2834 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2835 txqent->hdr.wi.lso_mss = 0;
2836 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2838 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2839 txqent->hdr.wi.lso_mss = htons(gso_size);
2842 if (bnad_tso_prepare(bnad, skb)) {
2843 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2847 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2848 txqent->hdr.wi.l4_hdr_size_n_offset =
2849 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2850 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2852 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2853 txqent->hdr.wi.lso_mss = 0;
2855 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2856 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2860 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2861 __be16 net_proto = vlan_get_protocol(skb);
2864 if (net_proto == htons(ETH_P_IP))
2865 proto = ip_hdr(skb)->protocol;
2866 #ifdef NETIF_F_IPV6_CSUM
2867 else if (net_proto == htons(ETH_P_IPV6)) {
2868 /* nexthdr may not be TCP immediately. */
2869 proto = ipv6_hdr(skb)->nexthdr;
2872 if (proto == IPPROTO_TCP) {
2873 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2874 txqent->hdr.wi.l4_hdr_size_n_offset =
2875 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2876 (0, skb_transport_offset(skb)));
2878 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2880 if (unlikely(skb_headlen(skb) <
2881 skb_transport_offset(skb) +
2883 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2886 } else if (proto == IPPROTO_UDP) {
2887 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2888 txqent->hdr.wi.l4_hdr_size_n_offset =
2889 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2890 (0, skb_transport_offset(skb)));
2892 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2893 if (unlikely(skb_headlen(skb) <
2894 skb_transport_offset(skb) +
2895 sizeof(struct udphdr))) {
2896 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2901 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2905 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2908 txqent->hdr.wi.flags = htons(flags);
2909 txqent->hdr.wi.frame_length = htonl(skb->len);
2915 * bnad_start_xmit : Netdev entry point for Transmit
2916 * Called under lock held by net_device
2919 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2921 struct bnad *bnad = netdev_priv(netdev);
2923 struct bna_tcb *tcb = NULL;
2924 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2925 u32 prod, q_depth, vect_id;
2926 u32 wis, vectors, len;
2928 dma_addr_t dma_addr;
2929 struct bna_txq_entry *txqent;
2931 len = skb_headlen(skb);
2933 /* Sanity checks for the skb */
2935 if (unlikely(skb->len <= ETH_HLEN)) {
2936 dev_kfree_skb_any(skb);
2937 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2938 return NETDEV_TX_OK;
2940 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2941 dev_kfree_skb_any(skb);
2942 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2943 return NETDEV_TX_OK;
2945 if (unlikely(len == 0)) {
2946 dev_kfree_skb_any(skb);
2947 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2948 return NETDEV_TX_OK;
2951 tcb = bnad->tx_info[0].tcb[txq_id];
2954 * Takes care of the Tx that is scheduled between clearing the flag
2955 * and the netif_tx_stop_all_queues() call.
2957 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2958 dev_kfree_skb_any(skb);
2959 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2960 return NETDEV_TX_OK;
2963 q_depth = tcb->q_depth;
2964 prod = tcb->producer_index;
2965 unmap_q = tcb->unmap_q;
2967 vectors = 1 + skb_shinfo(skb)->nr_frags;
2968 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2970 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2971 dev_kfree_skb_any(skb);
2972 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2973 return NETDEV_TX_OK;
2976 /* Check for available TxQ resources */
2977 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2978 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2979 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2981 sent = bnad_txcmpl_process(bnad, tcb);
2982 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2983 bna_ib_ack(tcb->i_dbell, sent);
2984 smp_mb__before_atomic();
2985 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2987 netif_stop_queue(netdev);
2988 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2993 * Check again to deal with race condition between
2994 * netif_stop_queue here, and netif_wake_queue in
2995 * interrupt handler which is not inside netif tx lock.
2997 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2998 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2999 return NETDEV_TX_BUSY;
3001 netif_wake_queue(netdev);
3002 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3006 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3007 head_unmap = &unmap_q[prod];
3009 /* Program the opcode, flags, frame_len, num_vectors in WI */
3010 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3011 dev_kfree_skb_any(skb);
3012 return NETDEV_TX_OK;
3014 txqent->hdr.wi.reserved = 0;
3015 txqent->hdr.wi.num_vectors = vectors;
3017 head_unmap->skb = skb;
3018 head_unmap->nvecs = 0;
3020 /* Program the vectors */
3022 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3023 len, DMA_TO_DEVICE);
3024 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3025 dev_kfree_skb_any(skb);
3026 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3027 return NETDEV_TX_OK;
3029 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3030 txqent->vector[0].length = htons(len);
3031 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3032 head_unmap->nvecs++;
3034 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3035 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3036 u32 size = skb_frag_size(frag);
3038 if (unlikely(size == 0)) {
3039 /* Undo the changes starting at tcb->producer_index */
3040 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3041 tcb->producer_index);
3042 dev_kfree_skb_any(skb);
3043 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3044 return NETDEV_TX_OK;
3050 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3052 BNA_QE_INDX_INC(prod, q_depth);
3053 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3054 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3055 unmap = &unmap_q[prod];
3058 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3059 0, size, DMA_TO_DEVICE);
3060 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3061 /* Undo the changes starting at tcb->producer_index */
3062 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3063 tcb->producer_index);
3064 dev_kfree_skb_any(skb);
3065 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3066 return NETDEV_TX_OK;
3069 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3070 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3071 txqent->vector[vect_id].length = htons(size);
3072 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3074 head_unmap->nvecs++;
3077 if (unlikely(len != skb->len)) {
3078 /* Undo the changes starting at tcb->producer_index */
3079 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3080 dev_kfree_skb_any(skb);
3081 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3082 return NETDEV_TX_OK;
3085 BNA_QE_INDX_INC(prod, q_depth);
3086 tcb->producer_index = prod;
3090 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3091 return NETDEV_TX_OK;
3093 skb_tx_timestamp(skb);
3095 bna_txq_prod_indx_doorbell(tcb);
3097 return NETDEV_TX_OK;
3101 * Used spin_lock to synchronize reading of stats structures, which
3102 * is written by BNA under the same lock.
3105 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3107 struct bnad *bnad = netdev_priv(netdev);
3108 unsigned long flags;
3110 spin_lock_irqsave(&bnad->bna_lock, flags);
3112 bnad_netdev_qstats_fill(bnad, stats);
3113 bnad_netdev_hwstats_fill(bnad, stats);
3115 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3119 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3121 struct net_device *netdev = bnad->netdev;
3122 int uc_count = netdev_uc_count(netdev);
3123 enum bna_cb_status ret;
3125 struct netdev_hw_addr *ha;
3128 if (netdev_uc_empty(bnad->netdev)) {
3129 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3133 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3136 mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3137 if (mac_list == NULL)
3141 netdev_for_each_uc_addr(ha, netdev) {
3142 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3146 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3149 if (ret != BNA_CB_SUCCESS)
3154 /* ucast packets not in UCAM are routed to default function */
3156 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3157 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3161 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3163 struct net_device *netdev = bnad->netdev;
3164 int mc_count = netdev_mc_count(netdev);
3165 enum bna_cb_status ret;
3168 if (netdev->flags & IFF_ALLMULTI)
3171 if (netdev_mc_empty(netdev))
3174 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3177 mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3179 if (mac_list == NULL)
3182 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3184 /* copy rest of the MCAST addresses */
3185 bnad_netdev_mc_list_get(netdev, mac_list);
3186 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3189 if (ret != BNA_CB_SUCCESS)
3195 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3196 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3200 bnad_set_rx_mode(struct net_device *netdev)
3202 struct bnad *bnad = netdev_priv(netdev);
3203 enum bna_rxmode new_mode, mode_mask;
3204 unsigned long flags;
3206 spin_lock_irqsave(&bnad->bna_lock, flags);
3208 if (bnad->rx_info[0].rx == NULL) {
3209 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3213 /* clear bnad flags to update it with new settings */
3214 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3218 if (netdev->flags & IFF_PROMISC) {
3219 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3220 bnad->cfg_flags |= BNAD_CF_PROMISC;
3222 bnad_set_rx_mcast_fltr(bnad);
3224 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3225 new_mode |= BNA_RXMODE_ALLMULTI;
3227 bnad_set_rx_ucast_fltr(bnad);
3229 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3230 new_mode |= BNA_RXMODE_DEFAULT;
3233 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3234 BNA_RXMODE_ALLMULTI;
3235 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3237 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3241 * bna_lock is used to sync writes to netdev->addr
3242 * conf_lock cannot be used since this call may be made
3243 * in a non-blocking context.
3246 bnad_set_mac_address(struct net_device *netdev, void *addr)
3249 struct bnad *bnad = netdev_priv(netdev);
3250 struct sockaddr *sa = (struct sockaddr *)addr;
3251 unsigned long flags;
3253 spin_lock_irqsave(&bnad->bna_lock, flags);
3255 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3257 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3259 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3265 bnad_mtu_set(struct bnad *bnad, int frame_size)
3267 unsigned long flags;
3269 init_completion(&bnad->bnad_completions.mtu_comp);
3271 spin_lock_irqsave(&bnad->bna_lock, flags);
3272 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3273 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3275 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3277 return bnad->bnad_completions.mtu_comp_status;
3281 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3284 struct bnad *bnad = netdev_priv(netdev);
3285 u32 rx_count = 0, frame, new_frame;
3287 mutex_lock(&bnad->conf_mutex);
3290 netdev->mtu = new_mtu;
3292 frame = BNAD_FRAME_SIZE(mtu);
3293 new_frame = BNAD_FRAME_SIZE(new_mtu);
3295 /* check if multi-buffer needs to be enabled */
3296 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3297 netif_running(bnad->netdev)) {
3298 /* only when transition is over 4K */
3299 if ((frame <= 4096 && new_frame > 4096) ||
3300 (frame > 4096 && new_frame <= 4096))
3301 rx_count = bnad_reinit_rx(bnad);
3304 /* rx_count > 0 - new rx created
3305 * - Linux set err = 0 and return
3307 err = bnad_mtu_set(bnad, new_frame);
3311 mutex_unlock(&bnad->conf_mutex);
3316 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3318 struct bnad *bnad = netdev_priv(netdev);
3319 unsigned long flags;
3321 if (!bnad->rx_info[0].rx)
3324 mutex_lock(&bnad->conf_mutex);
3326 spin_lock_irqsave(&bnad->bna_lock, flags);
3327 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3328 set_bit(vid, bnad->active_vlans);
3329 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3331 mutex_unlock(&bnad->conf_mutex);
3337 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3339 struct bnad *bnad = netdev_priv(netdev);
3340 unsigned long flags;
3342 if (!bnad->rx_info[0].rx)
3345 mutex_lock(&bnad->conf_mutex);
3347 spin_lock_irqsave(&bnad->bna_lock, flags);
3348 clear_bit(vid, bnad->active_vlans);
3349 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3350 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3352 mutex_unlock(&bnad->conf_mutex);
3357 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3359 struct bnad *bnad = netdev_priv(dev);
3360 netdev_features_t changed = features ^ dev->features;
3362 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3363 unsigned long flags;
3365 spin_lock_irqsave(&bnad->bna_lock, flags);
3367 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3368 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3370 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3372 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3378 #ifdef CONFIG_NET_POLL_CONTROLLER
3380 bnad_netpoll(struct net_device *netdev)
3382 struct bnad *bnad = netdev_priv(netdev);
3383 struct bnad_rx_info *rx_info;
3384 struct bnad_rx_ctrl *rx_ctrl;
3388 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3389 bna_intx_disable(&bnad->bna, curr_mask);
3390 bnad_isr(bnad->pcidev->irq, netdev);
3391 bna_intx_enable(&bnad->bna, curr_mask);
3394 * Tx processing may happen in sending context, so no need
3395 * to explicitly process completions here
3399 for (i = 0; i < bnad->num_rx; i++) {
3400 rx_info = &bnad->rx_info[i];
3403 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3404 rx_ctrl = &rx_info->rx_ctrl[j];
3406 bnad_netif_rx_schedule_poll(bnad,
3414 static const struct net_device_ops bnad_netdev_ops = {
3415 .ndo_open = bnad_open,
3416 .ndo_stop = bnad_stop,
3417 .ndo_start_xmit = bnad_start_xmit,
3418 .ndo_get_stats64 = bnad_get_stats64,
3419 .ndo_set_rx_mode = bnad_set_rx_mode,
3420 .ndo_validate_addr = eth_validate_addr,
3421 .ndo_set_mac_address = bnad_set_mac_address,
3422 .ndo_change_mtu = bnad_change_mtu,
3423 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3424 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3425 .ndo_set_features = bnad_set_features,
3426 #ifdef CONFIG_NET_POLL_CONTROLLER
3427 .ndo_poll_controller = bnad_netpoll
3432 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3434 struct net_device *netdev = bnad->netdev;
3436 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3437 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3438 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3439 NETIF_F_HW_VLAN_CTAG_RX;
3441 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3442 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3443 NETIF_F_TSO | NETIF_F_TSO6;
3445 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3448 netdev->features |= NETIF_F_HIGHDMA;
3450 netdev->mem_start = bnad->mmio_start;
3451 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3453 /* MTU range: 46 - 9000 */
3454 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3455 netdev->max_mtu = BNAD_JUMBO_MTU;
3457 netdev->netdev_ops = &bnad_netdev_ops;
3458 bnad_set_ethtool_ops(netdev);
3462 * 1. Initialize the bnad structure
3463 * 2. Setup netdev pointer in pci_dev
3464 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3465 * 4. Initialize work queue.
3468 bnad_init(struct bnad *bnad,
3469 struct pci_dev *pdev, struct net_device *netdev)
3471 unsigned long flags;
3473 SET_NETDEV_DEV(netdev, &pdev->dev);
3474 pci_set_drvdata(pdev, netdev);
3476 bnad->netdev = netdev;
3477 bnad->pcidev = pdev;
3478 bnad->mmio_start = pci_resource_start(pdev, 0);
3479 bnad->mmio_len = pci_resource_len(pdev, 0);
3480 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3482 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3485 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3486 (unsigned long long) bnad->mmio_len);
3488 spin_lock_irqsave(&bnad->bna_lock, flags);
3489 if (!bnad_msix_disable)
3490 bnad->cfg_flags = BNAD_CF_MSIX;
3492 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3494 bnad_q_num_init(bnad);
3495 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3497 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3498 (bnad->num_rx * bnad->num_rxp_per_rx) +
3499 BNAD_MAILBOX_MSIX_VECTORS;
3501 bnad->txq_depth = BNAD_TXQ_DEPTH;
3502 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3504 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3505 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3507 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3508 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3509 if (!bnad->work_q) {
3510 iounmap(bnad->bar0);
3518 * Must be called after bnad_pci_uninit()
3519 * so that iounmap() and pci_set_drvdata(NULL)
3520 * happens only after PCI uninitialization.
3523 bnad_uninit(struct bnad *bnad)
3526 flush_workqueue(bnad->work_q);
3527 destroy_workqueue(bnad->work_q);
3528 bnad->work_q = NULL;
3532 iounmap(bnad->bar0);
3537 a) Per ioceth mutes used for serializing configuration
3538 changes from OS interface
3539 b) spin lock used to protect bna state machine
3542 bnad_lock_init(struct bnad *bnad)
3544 spin_lock_init(&bnad->bna_lock);
3545 mutex_init(&bnad->conf_mutex);
3549 bnad_lock_uninit(struct bnad *bnad)
3551 mutex_destroy(&bnad->conf_mutex);
3554 /* PCI Initialization */
3556 bnad_pci_init(struct bnad *bnad,
3557 struct pci_dev *pdev, bool *using_dac)
3561 err = pci_enable_device(pdev);
3564 err = pci_request_regions(pdev, BNAD_NAME);
3566 goto disable_device;
3567 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3570 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3572 goto release_regions;
3575 pci_set_master(pdev);
3579 pci_release_regions(pdev);
3581 pci_disable_device(pdev);
3587 bnad_pci_uninit(struct pci_dev *pdev)
3589 pci_release_regions(pdev);
3590 pci_disable_device(pdev);
3594 bnad_pci_probe(struct pci_dev *pdev,
3595 const struct pci_device_id *pcidev_id)
3601 struct net_device *netdev;
3602 struct bfa_pcidev pcidev_info;
3603 unsigned long flags;
3605 mutex_lock(&bnad_fwimg_mutex);
3606 if (!cna_get_firmware_buf(pdev)) {
3607 mutex_unlock(&bnad_fwimg_mutex);
3608 dev_err(&pdev->dev, "failed to load firmware image!\n");
3611 mutex_unlock(&bnad_fwimg_mutex);
3614 * Allocates sizeof(struct net_device + struct bnad)
3615 * bnad = netdev->priv
3617 netdev = alloc_etherdev(sizeof(struct bnad));
3622 bnad = netdev_priv(netdev);
3623 bnad_lock_init(bnad);
3624 bnad->id = atomic_inc_return(&bna_id) - 1;
3626 mutex_lock(&bnad->conf_mutex);
3628 * PCI initialization
3629 * Output : using_dac = 1 for 64 bit DMA
3630 * = 0 for 32 bit DMA
3633 err = bnad_pci_init(bnad, pdev, &using_dac);
3638 * Initialize bnad structure
3639 * Setup relation between pci_dev & netdev
3641 err = bnad_init(bnad, pdev, netdev);
3645 /* Initialize netdev structure, set up ethtool ops */
3646 bnad_netdev_init(bnad, using_dac);
3648 /* Set link to down state */
3649 netif_carrier_off(netdev);
3651 /* Setup the debugfs node for this bfad */
3652 if (bna_debugfs_enable)
3653 bnad_debugfs_init(bnad);
3655 /* Get resource requirement form bna */
3656 spin_lock_irqsave(&bnad->bna_lock, flags);
3657 bna_res_req(&bnad->res_info[0]);
3658 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3660 /* Allocate resources from bna */
3661 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3667 /* Setup pcidev_info for bna_init() */
3668 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3669 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3670 pcidev_info.device_id = bnad->pcidev->device;
3671 pcidev_info.pci_bar_kva = bnad->bar0;
3673 spin_lock_irqsave(&bnad->bna_lock, flags);
3674 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3675 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3677 bnad->stats.bna_stats = &bna->stats;
3679 bnad_enable_msix(bnad);
3680 err = bnad_mbox_irq_alloc(bnad);
3685 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3686 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3687 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3688 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3693 * If the call back comes with error, we bail out.
3694 * This is a catastrophic error.
3696 err = bnad_ioceth_enable(bnad);
3698 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3702 spin_lock_irqsave(&bnad->bna_lock, flags);
3703 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3704 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3705 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3706 bna_attr(bna)->num_rxp - 1);
3707 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3708 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3711 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3713 goto disable_ioceth;
3715 spin_lock_irqsave(&bnad->bna_lock, flags);
3716 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3719 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3722 goto disable_ioceth;
3725 spin_lock_irqsave(&bnad->bna_lock, flags);
3726 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3727 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3729 /* Get the burnt-in mac */
3730 spin_lock_irqsave(&bnad->bna_lock, flags);
3731 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3732 bnad_set_netdev_perm_addr(bnad);
3733 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3735 mutex_unlock(&bnad->conf_mutex);
3737 /* Finally, reguister with net_device layer */
3738 err = register_netdev(netdev);
3740 dev_err(&pdev->dev, "registering net device failed\n");
3743 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3748 mutex_unlock(&bnad->conf_mutex);
3752 mutex_lock(&bnad->conf_mutex);
3753 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3755 bnad_ioceth_disable(bnad);
3756 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3757 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3758 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3759 spin_lock_irqsave(&bnad->bna_lock, flags);
3761 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3762 bnad_mbox_irq_free(bnad);
3763 bnad_disable_msix(bnad);
3765 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3767 /* Remove the debugfs node for this bnad */
3768 kfree(bnad->regdata);
3769 bnad_debugfs_uninit(bnad);
3772 bnad_pci_uninit(pdev);
3774 mutex_unlock(&bnad->conf_mutex);
3775 bnad_lock_uninit(bnad);
3776 free_netdev(netdev);
3781 bnad_pci_remove(struct pci_dev *pdev)
3783 struct net_device *netdev = pci_get_drvdata(pdev);
3786 unsigned long flags;
3791 bnad = netdev_priv(netdev);
3794 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3795 unregister_netdev(netdev);
3797 mutex_lock(&bnad->conf_mutex);
3798 bnad_ioceth_disable(bnad);
3799 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3800 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3801 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3802 spin_lock_irqsave(&bnad->bna_lock, flags);
3804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3806 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3807 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3808 bnad_mbox_irq_free(bnad);
3809 bnad_disable_msix(bnad);
3810 bnad_pci_uninit(pdev);
3811 mutex_unlock(&bnad->conf_mutex);
3812 bnad_lock_uninit(bnad);
3813 /* Remove the debugfs node for this bnad */
3814 kfree(bnad->regdata);
3815 bnad_debugfs_uninit(bnad);
3817 free_netdev(netdev);
3820 static const struct pci_device_id bnad_pci_id_table[] = {
3822 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3823 PCI_DEVICE_ID_BROCADE_CT),
3824 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3825 .class_mask = 0xffff00
3828 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3829 BFA_PCI_DEVICE_ID_CT2),
3830 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3831 .class_mask = 0xffff00
3836 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3838 static struct pci_driver bnad_pci_driver = {
3840 .id_table = bnad_pci_id_table,
3841 .probe = bnad_pci_probe,
3842 .remove = bnad_pci_remove,
3846 bnad_module_init(void)
3850 pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3853 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3855 err = pci_register_driver(&bnad_pci_driver);
3857 pr_err("bna: PCI driver registration failed err=%d\n", err);
3865 bnad_module_exit(void)
3867 pci_unregister_driver(&bnad_pci_driver);
3868 release_firmware(bfi_fw);
3871 module_init(bnad_module_init);
3872 module_exit(bnad_module_exit);
3874 MODULE_AUTHOR("Brocade");
3875 MODULE_LICENSE("GPL");
3876 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3877 MODULE_VERSION(BNAD_VERSION);
3878 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3879 MODULE_FIRMWARE(CNA_FW_FILE_CT2);