2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 static int debug = -1; /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
81 "Default is OFF - Do Not allocate memory. ");
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 /* required last entry */
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
103 /* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
147 unsigned int wait_count = 30;
149 if (!ql_sem_trylock(qdev, sem_mask))
152 } while (--wait_count);
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
162 /* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
172 for (count = 0; count < UDELAY_COUNT; count++) {
173 temp = ql_read32(qdev, reg);
175 /* check for errors */
176 if (temp & err_bit) {
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
181 } else if (temp & bit)
183 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
198 for (count = 0; count < UDELAY_COUNT; count++) {
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
210 /* Used to issue init control blocks to hw. Maps control block,
211 * sets address, triggers download, waits for completion.
213 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
223 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 map = pci_map_single(qdev->pdev, ptr, size, direction);
227 if (pci_dma_mapping_error(qdev->pdev, map)) {
228 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
232 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
236 status = ql_wait_cfg(qdev, bit);
238 netif_err(qdev, ifup, qdev->ndev,
239 "Timed out waiting for CFG to come ready.\n");
243 ql_write32(qdev, ICB_L, (u32) map);
244 ql_write32(qdev, ICB_H, (u32) (map >> 32));
246 mask = CFG_Q_MASK | (bit << 16);
247 value = bit | (q_id << CFG_Q_SHIFT);
248 ql_write32(qdev, CFG, (mask | value));
251 * Wait for the bit to clear after signaling hw.
253 status = ql_wait_cfg(qdev, bit);
255 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
256 pci_unmap_single(qdev->pdev, map, size, direction);
260 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
261 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
268 case MAC_ADDR_TYPE_MULTI_MAC:
269 case MAC_ADDR_TYPE_CAM_MAC:
272 ql_wait_reg_rdy(qdev,
273 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
276 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277 (index << MAC_ADDR_IDX_SHIFT) | /* index */
278 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 ql_wait_reg_rdy(qdev,
281 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
284 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 ql_wait_reg_rdy(qdev,
287 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
290 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
291 (index << MAC_ADDR_IDX_SHIFT) | /* index */
292 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 ql_wait_reg_rdy(qdev,
295 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
298 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
299 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 ql_wait_reg_rdy(qdev,
302 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
305 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
306 (index << MAC_ADDR_IDX_SHIFT) | /* index */
307 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
313 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
317 case MAC_ADDR_TYPE_VLAN:
318 case MAC_ADDR_TYPE_MULTI_FLTR:
320 netif_crit(qdev, ifup, qdev->ndev,
321 "Address type %d not yet supported.\n", type);
328 /* Set up a MAC, multicast or VLAN address for the
329 * inbound frame matching.
331 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
338 case MAC_ADDR_TYPE_MULTI_MAC:
340 u32 upper = (addr[0] << 8) | addr[1];
341 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
342 (addr[4] << 8) | (addr[5]);
345 ql_wait_reg_rdy(qdev,
346 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
350 (index << MAC_ADDR_IDX_SHIFT) |
352 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 ql_wait_reg_rdy(qdev,
355 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
359 (index << MAC_ADDR_IDX_SHIFT) |
362 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 ql_wait_reg_rdy(qdev,
365 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
370 case MAC_ADDR_TYPE_CAM_MAC:
373 u32 upper = (addr[0] << 8) | addr[1];
375 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 ql_wait_reg_rdy(qdev,
379 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
382 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
383 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 ql_wait_reg_rdy(qdev,
388 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
391 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
392 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 ql_wait_reg_rdy(qdev,
397 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
400 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
401 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 /* This field should also include the queue id
404 and possibly the function id. Right now we hardcode
405 the route field to NIC core.
407 cam_output = (CAM_OUT_ROUTE_NIC |
409 func << CAM_OUT_FUNC_SHIFT) |
410 (0 << CAM_OUT_CQ_ID_SHIFT));
411 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
412 cam_output |= CAM_OUT_RV;
413 /* route to NIC core */
414 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
417 case MAC_ADDR_TYPE_VLAN:
419 u32 enable_bit = *((u32 *) &addr[0]);
420 /* For VLAN, the addr actually holds a bit that
421 * either enables or disables the vlan id we are
422 * addressing. It's either MAC_ADDR_E on or off.
423 * That's bit-27 we're talking about.
426 ql_wait_reg_rdy(qdev,
427 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
430 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
431 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 enable_bit); /* enable/disable */
436 case MAC_ADDR_TYPE_MULTI_FLTR:
438 netif_crit(qdev, ifup, qdev->ndev,
439 "Address type %d not yet supported.\n", type);
446 /* Set or clear MAC address in hardware. We sometimes
447 * have to clear it to prevent wrong frame routing
448 * especially in a bonding environment.
450 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453 char zero_mac_addr[ETH_ALEN];
457 addr = &qdev->current_mac_addr[0];
458 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
459 "Set Mac addr %pM\n", addr);
461 eth_zero_addr(zero_mac_addr);
462 addr = &zero_mac_addr[0];
463 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
464 "Clearing MAC address\n");
466 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
470 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
471 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 netif_err(qdev, ifup, qdev->ndev,
474 "Failed to init mac address.\n");
478 void ql_link_on(struct ql_adapter *qdev)
480 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
481 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1);
485 void ql_link_off(struct ql_adapter *qdev)
487 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
488 netif_carrier_off(qdev->ndev);
489 ql_set_mac_addr(qdev, 0);
492 /* Get a specific frame routing value from the CAM.
493 * Used for debug and reg dump.
495 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
499 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
503 ql_write32(qdev, RT_IDX,
504 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
505 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
508 *value = ql_read32(qdev, RT_DATA);
513 /* The NIC function for this chip has 16 routing indexes. Each one can be used
514 * to route different frame types to various inbound queues. We send broadcast/
515 * multicast/error frames to the default queue for slow handling,
516 * and CAM hit/RSS frames to the fast handling queues.
518 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 int status = -EINVAL; /* Return error if no mask match. */
527 value = RT_IDX_DST_CAM_Q | /* dest */
528 RT_IDX_TYPE_NICQ | /* type */
529 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 value = RT_IDX_DST_DFLT_Q | /* dest */
535 RT_IDX_TYPE_NICQ | /* type */
536 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 value = RT_IDX_DST_DFLT_Q | /* dest */
542 RT_IDX_TYPE_NICQ | /* type */
543 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 value = RT_IDX_DST_DFLT_Q | /* dest */
549 RT_IDX_TYPE_NICQ | /* type */
550 (RT_IDX_IP_CSUM_ERR_SLOT <<
551 RT_IDX_IDX_SHIFT); /* index */
554 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 value = RT_IDX_DST_DFLT_Q | /* dest */
557 RT_IDX_TYPE_NICQ | /* type */
558 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
559 RT_IDX_IDX_SHIFT); /* index */
562 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 value = RT_IDX_DST_DFLT_Q | /* dest */
579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 value = RT_IDX_DST_RSS | /* dest */
586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 case 0: /* Clear the E-bit on an entry. */
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (index << RT_IDX_IDX_SHIFT);/* index */
598 netif_err(qdev, ifup, qdev->ndev,
599 "Mask type %d not yet supported.\n", mask);
605 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 value |= (enable ? RT_IDX_E : 0);
609 ql_write32(qdev, RT_IDX, value);
610 ql_write32(qdev, RT_DATA, enable ? mask : 0);
616 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626 static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
628 struct intr_context *ctx = &qdev->intr_context[intr];
630 ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
633 static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
635 struct intr_context *ctx = &qdev->intr_context[intr];
637 ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
640 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
644 for (i = 0; i < qdev->intr_count; i++)
645 ql_enable_completion_interrupt(qdev, i);
648 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
652 __le16 *flash = (__le16 *)&qdev->flash;
654 status = strncmp((char *)&qdev->flash, str, 4);
656 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
660 for (i = 0; i < size; i++)
661 csum += le16_to_cpu(*flash++);
664 netif_err(qdev, ifup, qdev->ndev,
665 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
670 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
673 /* wait for reg to come ready */
674 status = ql_wait_reg_rdy(qdev,
675 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
678 /* set up for reg read */
679 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
680 /* wait for reg to come ready */
681 status = ql_wait_reg_rdy(qdev,
682 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
685 /* This data is stored on flash as an array of
686 * __le32. Since ql_read32() returns cpu endian
687 * we need to swap it back.
689 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
694 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
698 __le32 *p = (__le32 *)&qdev->flash;
702 /* Get flash offset for function and adjust
706 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
708 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
710 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
713 size = sizeof(struct flash_params_8000) / sizeof(u32);
714 for (i = 0; i < size; i++, p++) {
715 status = ql_read_flash_word(qdev, i+offset, p);
717 netif_err(qdev, ifup, qdev->ndev,
718 "Error reading flash.\n");
723 status = ql_validate_flash(qdev,
724 sizeof(struct flash_params_8000) / sizeof(u16),
727 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
732 /* Extract either manufacturer or BOFM modified
735 if (qdev->flash.flash_params_8000.data_type1 == 2)
737 qdev->flash.flash_params_8000.mac_addr1,
738 qdev->ndev->addr_len);
741 qdev->flash.flash_params_8000.mac_addr,
742 qdev->ndev->addr_len);
744 if (!is_valid_ether_addr(mac_addr)) {
745 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
750 memcpy(qdev->ndev->dev_addr,
752 qdev->ndev->addr_len);
755 ql_sem_unlock(qdev, SEM_FLASH_MASK);
759 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
763 __le32 *p = (__le32 *)&qdev->flash;
765 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
767 /* Second function's parameters follow the first
773 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
776 for (i = 0; i < size; i++, p++) {
777 status = ql_read_flash_word(qdev, i+offset, p);
779 netif_err(qdev, ifup, qdev->ndev,
780 "Error reading flash.\n");
786 status = ql_validate_flash(qdev,
787 sizeof(struct flash_params_8012) / sizeof(u16),
790 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
795 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
800 memcpy(qdev->ndev->dev_addr,
801 qdev->flash.flash_params_8012.mac_addr,
802 qdev->ndev->addr_len);
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
809 /* xgmac register are located behind the xgmac_addr and xgmac_data
810 * register pair. Each read/write requires us to wait for the ready
811 * bit before reading/writing the data.
813 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
816 /* wait for reg to come ready */
817 status = ql_wait_reg_rdy(qdev,
818 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
821 /* write the data to the data reg */
822 ql_write32(qdev, XGMAC_DATA, data);
823 /* trigger the write */
824 ql_write32(qdev, XGMAC_ADDR, reg);
828 /* xgmac register are located behind the xgmac_addr and xgmac_data
829 * register pair. Each read/write requires us to wait for the ready
830 * bit before reading/writing the data.
832 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
835 /* wait for reg to come ready */
836 status = ql_wait_reg_rdy(qdev,
837 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
840 /* set up for reg read */
841 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
842 /* wait for reg to come ready */
843 status = ql_wait_reg_rdy(qdev,
844 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
848 *data = ql_read32(qdev, XGMAC_DATA);
853 /* This is used for reading the 64-bit statistics regs. */
854 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
860 status = ql_read_xgmac_reg(qdev, reg, &lo);
864 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
868 *data = (u64) lo | ((u64) hi << 32);
874 static int ql_8000_port_initialize(struct ql_adapter *qdev)
878 * Get MPI firmware version for driver banner
881 status = ql_mb_about_fw(qdev);
884 status = ql_mb_get_fw_state(qdev);
887 /* Wake up a worker to get/set the TX/RX frame sizes. */
888 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
893 /* Take the MAC Core out of reset.
894 * Enable statistics counting.
895 * Take the transmitter/receiver out of reset.
896 * This functionality may be done in the MPI firmware at a
899 static int ql_8012_port_initialize(struct ql_adapter *qdev)
904 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
905 /* Another function has the semaphore, so
906 * wait for the port init bit to come ready.
908 netif_info(qdev, link, qdev->ndev,
909 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
910 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
912 netif_crit(qdev, link, qdev->ndev,
913 "Port initialize timed out.\n");
918 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
919 /* Set the core reset. */
920 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
923 data |= GLOBAL_CFG_RESET;
924 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
928 /* Clear the core reset and turn on jumbo for receiver. */
929 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
930 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
931 data |= GLOBAL_CFG_TX_STAT_EN;
932 data |= GLOBAL_CFG_RX_STAT_EN;
933 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
937 /* Enable transmitter, and clear it's reset. */
938 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
941 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
942 data |= TX_CFG_EN; /* Enable the transmitter. */
943 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
947 /* Enable receiver and clear it's reset. */
948 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
951 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
952 data |= RX_CFG_EN; /* Enable the receiver. */
953 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
959 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
963 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
967 /* Signal to the world that the port is enabled. */
968 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
970 ql_sem_unlock(qdev, qdev->xg_sem_mask);
974 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
976 return PAGE_SIZE << qdev->lbq_buf_order;
979 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
981 struct qlge_bq_desc *bq_desc;
983 bq_desc = &bq->queue[bq->next_to_clean];
984 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
989 static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
990 struct rx_ring *rx_ring)
992 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
994 pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
995 qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
997 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
998 ql_lbq_block_size(qdev)) {
999 /* last chunk of the master page */
1000 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
1001 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
1007 /* Update an rx ring index. */
1008 static void ql_update_cq(struct rx_ring *rx_ring)
1010 rx_ring->cnsmr_idx++;
1011 rx_ring->curr_entry++;
1012 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1013 rx_ring->cnsmr_idx = 0;
1014 rx_ring->curr_entry = rx_ring->cq_base;
1018 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1020 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1023 static const char * const bq_type_name[] = {
1028 /* return 0 or negative error */
1029 static int qlge_refill_sb(struct rx_ring *rx_ring,
1030 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1032 struct ql_adapter *qdev = rx_ring->qdev;
1033 struct sk_buff *skb;
1035 if (sbq_desc->p.skb)
1038 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1039 "ring %u sbq: getting new skb for index %d.\n",
1040 rx_ring->cq_id, sbq_desc->index);
1042 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1045 skb_reserve(skb, QLGE_SB_PAD);
1047 sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
1049 PCI_DMA_FROMDEVICE);
1050 if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
1051 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1052 dev_kfree_skb_any(skb);
1055 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1057 sbq_desc->p.skb = skb;
1061 /* return 0 or negative error */
1062 static int qlge_refill_lb(struct rx_ring *rx_ring,
1063 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1065 struct ql_adapter *qdev = rx_ring->qdev;
1066 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1068 if (!master_chunk->page) {
1070 dma_addr_t dma_addr;
1072 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1073 if (unlikely(!page))
1075 dma_addr = pci_map_page(qdev->pdev, page, 0,
1076 ql_lbq_block_size(qdev),
1077 PCI_DMA_FROMDEVICE);
1078 if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
1079 __free_pages(page, qdev->lbq_buf_order);
1080 netif_err(qdev, drv, qdev->ndev,
1081 "PCI mapping failed.\n");
1084 master_chunk->page = page;
1085 master_chunk->va = page_address(page);
1086 master_chunk->offset = 0;
1087 rx_ring->chunk_dma_addr = dma_addr;
1090 lbq_desc->p.pg_chunk = *master_chunk;
1091 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1092 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1093 lbq_desc->p.pg_chunk.offset);
1095 /* Adjust the master page chunk for next
1098 master_chunk->offset += qdev->lbq_buf_size;
1099 if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1100 master_chunk->page = NULL;
1102 master_chunk->va += qdev->lbq_buf_size;
1103 get_page(master_chunk->page);
1109 /* return 0 or negative error */
1110 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1112 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1113 struct ql_adapter *qdev = rx_ring->qdev;
1114 struct qlge_bq_desc *bq_desc;
1119 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1124 i = bq->next_to_use;
1125 bq_desc = &bq->queue[i];
1128 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1129 "ring %u %s: try cleaning idx %d\n",
1130 rx_ring->cq_id, bq_type_name[bq->type], i);
1132 if (bq->type == QLGE_SB)
1133 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1135 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1137 netif_err(qdev, ifup, qdev->ndev,
1138 "ring %u %s: Could not get a page chunk, idx %d\n",
1139 rx_ring->cq_id, bq_type_name[bq->type], i);
1146 bq_desc = &bq->queue[0];
1150 } while (refill_count);
1153 if (bq->next_to_use != i) {
1154 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1155 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1156 "ring %u %s: updating prod idx = %d.\n",
1157 rx_ring->cq_id, bq_type_name[bq->type],
1159 ql_write_db_reg(i, bq->prod_idx_db_reg);
1161 bq->next_to_use = i;
1167 static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1168 unsigned long delay)
1170 bool sbq_fail, lbq_fail;
1172 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1173 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1175 /* Minimum number of buffers needed to be able to receive at least one
1176 * frame of any format:
1177 * sbq: 1 for header + 1 for data
1178 * lbq: mtu 9000 / lb size
1179 * Below this, the queue might stall.
1181 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1182 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1183 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1184 /* Allocations can take a long time in certain cases (ex.
1185 * reclaim). Therefore, use a workqueue for long-running
1188 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1189 &rx_ring->refill_work, delay);
1192 static void qlge_slow_refill(struct work_struct *work)
1194 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1196 struct napi_struct *napi = &rx_ring->napi;
1199 ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1203 /* napi_disable() might have prevented incomplete napi work from being
1206 napi_schedule(napi);
1207 /* trigger softirq processing */
1211 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1212 * fails at some stage, or from the interrupt when a tx completes.
1214 static void ql_unmap_send(struct ql_adapter *qdev,
1215 struct tx_ring_desc *tx_ring_desc, int mapped)
1218 for (i = 0; i < mapped; i++) {
1219 if (i == 0 || (i == 7 && mapped > 7)) {
1221 * Unmap the skb->data area, or the
1222 * external sglist (AKA the Outbound
1223 * Address List (OAL)).
1224 * If its the zeroeth element, then it's
1225 * the skb->data area. If it's the 7th
1226 * element and there is more than 6 frags,
1230 netif_printk(qdev, tx_done, KERN_DEBUG,
1232 "unmapping OAL area.\n");
1234 pci_unmap_single(qdev->pdev,
1235 dma_unmap_addr(&tx_ring_desc->map[i],
1237 dma_unmap_len(&tx_ring_desc->map[i],
1241 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1242 "unmapping frag %d.\n", i);
1243 pci_unmap_page(qdev->pdev,
1244 dma_unmap_addr(&tx_ring_desc->map[i],
1246 dma_unmap_len(&tx_ring_desc->map[i],
1247 maplen), PCI_DMA_TODEVICE);
1253 /* Map the buffers for this transmit. This will return
1254 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1256 static int ql_map_send(struct ql_adapter *qdev,
1257 struct ob_mac_iocb_req *mac_iocb_ptr,
1258 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1260 int len = skb_headlen(skb);
1262 int frag_idx, err, map_idx = 0;
1263 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1264 int frag_cnt = skb_shinfo(skb)->nr_frags;
1267 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1268 "frag_cnt = %d.\n", frag_cnt);
1271 * Map the skb buffer first.
1273 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1275 err = pci_dma_mapping_error(qdev->pdev, map);
1277 netif_err(qdev, tx_queued, qdev->ndev,
1278 "PCI mapping failed with error: %d\n", err);
1280 return NETDEV_TX_BUSY;
1283 tbd->len = cpu_to_le32(len);
1284 tbd->addr = cpu_to_le64(map);
1285 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1286 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1290 * This loop fills the remainder of the 8 address descriptors
1291 * in the IOCB. If there are more than 7 fragments, then the
1292 * eighth address desc will point to an external list (OAL).
1293 * When this happens, the remainder of the frags will be stored
1296 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1297 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1299 if (frag_idx == 6 && frag_cnt > 7) {
1300 /* Let's tack on an sglist.
1301 * Our control block will now
1303 * iocb->seg[0] = skb->data
1304 * iocb->seg[1] = frag[0]
1305 * iocb->seg[2] = frag[1]
1306 * iocb->seg[3] = frag[2]
1307 * iocb->seg[4] = frag[3]
1308 * iocb->seg[5] = frag[4]
1309 * iocb->seg[6] = frag[5]
1310 * iocb->seg[7] = ptr to OAL (external sglist)
1311 * oal->seg[0] = frag[6]
1312 * oal->seg[1] = frag[7]
1313 * oal->seg[2] = frag[8]
1314 * oal->seg[3] = frag[9]
1315 * oal->seg[4] = frag[10]
1318 /* Tack on the OAL in the eighth segment of IOCB. */
1319 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1322 err = pci_dma_mapping_error(qdev->pdev, map);
1324 netif_err(qdev, tx_queued, qdev->ndev,
1325 "PCI mapping outbound address list with error: %d\n",
1330 tbd->addr = cpu_to_le64(map);
1332 * The length is the number of fragments
1333 * that remain to be mapped times the length
1334 * of our sglist (OAL).
1337 cpu_to_le32((sizeof(struct tx_buf_desc) *
1338 (frag_cnt - frag_idx)) | TX_DESC_C);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1341 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1342 sizeof(struct oal));
1343 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1347 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1350 err = dma_mapping_error(&qdev->pdev->dev, map);
1352 netif_err(qdev, tx_queued, qdev->ndev,
1353 "PCI mapping frags failed with error: %d.\n",
1358 tbd->addr = cpu_to_le64(map);
1359 tbd->len = cpu_to_le32(skb_frag_size(frag));
1360 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1361 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1362 skb_frag_size(frag));
1365 /* Save the number of segments we've mapped. */
1366 tx_ring_desc->map_cnt = map_idx;
1367 /* Terminate the last segment. */
1368 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1369 return NETDEV_TX_OK;
1373 * If the first frag mapping failed, then i will be zero.
1374 * This causes the unmap of the skb->data area. Otherwise
1375 * we pass in the number of frags that mapped successfully
1376 * so they can be umapped.
1378 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1379 return NETDEV_TX_BUSY;
1382 /* Categorizing receive firmware frame errors */
1383 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1384 struct rx_ring *rx_ring)
1386 struct nic_stats *stats = &qdev->nic_stats;
1388 stats->rx_err_count++;
1389 rx_ring->rx_errors++;
1391 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1392 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1393 stats->rx_code_err++;
1395 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1396 stats->rx_oversize_err++;
1398 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1399 stats->rx_undersize_err++;
1401 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1402 stats->rx_preamble_err++;
1404 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1405 stats->rx_frame_len_err++;
1407 case IB_MAC_IOCB_RSP_ERR_CRC:
1408 stats->rx_crc_err++;
1415 * ql_update_mac_hdr_len - helper routine to update the mac header length
1416 * based on vlan tags if present
1418 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1419 struct ib_mac_iocb_rsp *ib_mac_rsp,
1420 void *page, size_t *len)
1424 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1426 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1428 /* Look for stacked vlan tags in ethertype field */
1429 if (tags[6] == ETH_P_8021Q &&
1430 tags[8] == ETH_P_8021Q)
1431 *len += 2 * VLAN_HLEN;
1437 /* Process an inbound completion from an rx ring. */
1438 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1439 struct rx_ring *rx_ring,
1440 struct ib_mac_iocb_rsp *ib_mac_rsp,
1444 struct sk_buff *skb;
1445 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1446 struct napi_struct *napi = &rx_ring->napi;
1448 /* Frame error, so drop the packet. */
1449 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1450 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1451 put_page(lbq_desc->p.pg_chunk.page);
1454 napi->dev = qdev->ndev;
1456 skb = napi_get_frags(napi);
1458 netif_err(qdev, drv, qdev->ndev,
1459 "Couldn't get an skb, exiting.\n");
1460 rx_ring->rx_dropped++;
1461 put_page(lbq_desc->p.pg_chunk.page);
1464 prefetch(lbq_desc->p.pg_chunk.va);
1465 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1466 lbq_desc->p.pg_chunk.page,
1467 lbq_desc->p.pg_chunk.offset,
1471 skb->data_len += length;
1472 skb->truesize += length;
1473 skb_shinfo(skb)->nr_frags++;
1475 rx_ring->rx_packets++;
1476 rx_ring->rx_bytes += length;
1477 skb->ip_summed = CHECKSUM_UNNECESSARY;
1478 skb_record_rx_queue(skb, rx_ring->cq_id);
1479 if (vlan_id != 0xffff)
1480 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1481 napi_gro_frags(napi);
1484 /* Process an inbound completion from an rx ring. */
1485 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1486 struct rx_ring *rx_ring,
1487 struct ib_mac_iocb_rsp *ib_mac_rsp,
1491 struct net_device *ndev = qdev->ndev;
1492 struct sk_buff *skb = NULL;
1494 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1495 struct napi_struct *napi = &rx_ring->napi;
1496 size_t hlen = ETH_HLEN;
1498 skb = netdev_alloc_skb(ndev, length);
1500 rx_ring->rx_dropped++;
1501 put_page(lbq_desc->p.pg_chunk.page);
1505 addr = lbq_desc->p.pg_chunk.va;
1508 /* Frame error, so drop the packet. */
1509 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1510 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1514 /* Update the MAC header length*/
1515 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1517 /* The max framesize filter on this chip is set higher than
1518 * MTU since FCoE uses 2k frames.
1520 if (skb->len > ndev->mtu + hlen) {
1521 netif_err(qdev, drv, qdev->ndev,
1522 "Segment too small, dropping.\n");
1523 rx_ring->rx_dropped++;
1526 skb_put_data(skb, addr, hlen);
1527 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1528 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1530 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1531 lbq_desc->p.pg_chunk.offset + hlen,
1533 skb->len += length - hlen;
1534 skb->data_len += length - hlen;
1535 skb->truesize += length - hlen;
1537 rx_ring->rx_packets++;
1538 rx_ring->rx_bytes += skb->len;
1539 skb->protocol = eth_type_trans(skb, ndev);
1540 skb_checksum_none_assert(skb);
1542 if ((ndev->features & NETIF_F_RXCSUM) &&
1543 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1545 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1546 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1547 "TCP checksum done!\n");
1548 skb->ip_summed = CHECKSUM_UNNECESSARY;
1549 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1550 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1551 /* Unfragmented ipv4 UDP frame. */
1553 (struct iphdr *)((u8 *)addr + hlen);
1554 if (!(iph->frag_off &
1555 htons(IP_MF|IP_OFFSET))) {
1556 skb->ip_summed = CHECKSUM_UNNECESSARY;
1557 netif_printk(qdev, rx_status, KERN_DEBUG,
1559 "UDP checksum done!\n");
1564 skb_record_rx_queue(skb, rx_ring->cq_id);
1565 if (vlan_id != 0xffff)
1566 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1567 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1568 napi_gro_receive(napi, skb);
1570 netif_receive_skb(skb);
1573 dev_kfree_skb_any(skb);
1574 put_page(lbq_desc->p.pg_chunk.page);
1577 /* Process an inbound completion from an rx ring. */
1578 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1579 struct rx_ring *rx_ring,
1580 struct ib_mac_iocb_rsp *ib_mac_rsp,
1584 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1585 struct net_device *ndev = qdev->ndev;
1586 struct sk_buff *skb, *new_skb;
1588 skb = sbq_desc->p.skb;
1589 /* Allocate new_skb and copy */
1590 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1592 rx_ring->rx_dropped++;
1595 skb_reserve(new_skb, NET_IP_ALIGN);
1597 pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
1598 SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1600 skb_put_data(new_skb, skb->data, length);
1604 /* Frame error, so drop the packet. */
1605 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1606 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1607 dev_kfree_skb_any(skb);
1611 /* loopback self test for ethtool */
1612 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1613 ql_check_lb_frame(qdev, skb);
1614 dev_kfree_skb_any(skb);
1618 /* The max framesize filter on this chip is set higher than
1619 * MTU since FCoE uses 2k frames.
1621 if (skb->len > ndev->mtu + ETH_HLEN) {
1622 dev_kfree_skb_any(skb);
1623 rx_ring->rx_dropped++;
1627 prefetch(skb->data);
1628 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1629 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1631 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1632 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1633 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1634 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1635 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1636 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1638 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1639 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1640 "Promiscuous Packet.\n");
1642 rx_ring->rx_packets++;
1643 rx_ring->rx_bytes += skb->len;
1644 skb->protocol = eth_type_trans(skb, ndev);
1645 skb_checksum_none_assert(skb);
1647 /* If rx checksum is on, and there are no
1648 * csum or frame errors.
1650 if ((ndev->features & NETIF_F_RXCSUM) &&
1651 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1653 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1654 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1655 "TCP checksum done!\n");
1656 skb->ip_summed = CHECKSUM_UNNECESSARY;
1657 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1658 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1659 /* Unfragmented ipv4 UDP frame. */
1660 struct iphdr *iph = (struct iphdr *) skb->data;
1661 if (!(iph->frag_off &
1662 htons(IP_MF|IP_OFFSET))) {
1663 skb->ip_summed = CHECKSUM_UNNECESSARY;
1664 netif_printk(qdev, rx_status, KERN_DEBUG,
1666 "UDP checksum done!\n");
1671 skb_record_rx_queue(skb, rx_ring->cq_id);
1672 if (vlan_id != 0xffff)
1673 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1674 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1675 napi_gro_receive(&rx_ring->napi, skb);
1677 netif_receive_skb(skb);
1680 static void ql_realign_skb(struct sk_buff *skb, int len)
1682 void *temp_addr = skb->data;
1684 /* Undo the skb_reserve(skb,32) we did before
1685 * giving to hardware, and realign data on
1686 * a 2-byte boundary.
1688 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1689 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1690 memmove(skb->data, temp_addr, len);
1694 * This function builds an skb for the given inbound
1695 * completion. It will be rewritten for readability in the near
1696 * future, but for not it works well.
1698 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1699 struct rx_ring *rx_ring,
1700 struct ib_mac_iocb_rsp *ib_mac_rsp)
1702 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1703 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1704 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1705 struct sk_buff *skb = NULL;
1706 size_t hlen = ETH_HLEN;
1709 * Handle the header buffer if present.
1711 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1712 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1713 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1714 "Header of %d bytes in small buffer.\n", hdr_len);
1716 * Headers fit nicely into a small buffer.
1718 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1719 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1720 SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1721 skb = sbq_desc->p.skb;
1722 ql_realign_skb(skb, hdr_len);
1723 skb_put(skb, hdr_len);
1724 sbq_desc->p.skb = NULL;
1728 * Handle the data buffer(s).
1730 if (unlikely(!length)) { /* Is there data too? */
1731 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1732 "No Data buffer in this packet.\n");
1736 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1737 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1738 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1739 "Headers in small, data of %d bytes in small, combine them.\n",
1742 * Data is less than small buffer size so it's
1743 * stuffed in a small buffer.
1744 * For this case we append the data
1745 * from the "data" small buffer to the "header" small
1748 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1749 pci_dma_sync_single_for_cpu(qdev->pdev,
1752 PCI_DMA_FROMDEVICE);
1753 skb_put_data(skb, sbq_desc->p.skb->data, length);
1755 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1756 "%d bytes in a single small buffer.\n",
1758 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1759 skb = sbq_desc->p.skb;
1760 ql_realign_skb(skb, length);
1761 skb_put(skb, length);
1762 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1764 PCI_DMA_FROMDEVICE);
1765 sbq_desc->p.skb = NULL;
1767 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1768 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1769 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1770 "Header in small, %d bytes in large. Chain large to small!\n",
1773 * The data is in a single large buffer. We
1774 * chain it to the header buffer's skb and let
1777 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1778 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779 "Chaining page at offset = %d, for %d bytes to skb.\n",
1780 lbq_desc->p.pg_chunk.offset, length);
1781 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1782 lbq_desc->p.pg_chunk.offset,
1785 skb->data_len += length;
1786 skb->truesize += length;
1789 * The headers and data are in a single large buffer. We
1790 * copy it to a new skb and let it go. This can happen with
1791 * jumbo mtu on a non-TCP/UDP frame.
1793 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1794 skb = netdev_alloc_skb(qdev->ndev, length);
1796 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1797 "No skb available, drop the packet.\n");
1800 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
1802 PCI_DMA_FROMDEVICE);
1803 skb_reserve(skb, NET_IP_ALIGN);
1804 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1805 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1807 skb_fill_page_desc(skb, 0,
1808 lbq_desc->p.pg_chunk.page,
1809 lbq_desc->p.pg_chunk.offset,
1812 skb->data_len += length;
1813 skb->truesize += length;
1814 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1815 lbq_desc->p.pg_chunk.va,
1817 __pskb_pull_tail(skb, hlen);
1821 * The data is in a chain of large buffers
1822 * pointed to by a small buffer. We loop
1823 * thru and chain them to the our small header
1825 * frags: There are 18 max frags and our small
1826 * buffer will hold 32 of them. The thing is,
1827 * we'll use 3 max for our 9000 byte jumbo
1828 * frames. If the MTU goes up we could
1829 * eventually be in trouble.
1832 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1833 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
1834 SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
1835 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1837 * This is an non TCP/UDP IP frame, so
1838 * the headers aren't split into a small
1839 * buffer. We have to use the small buffer
1840 * that contains our sg list as our skb to
1841 * send upstairs. Copy the sg list here to
1842 * a local buffer and use it to find the
1845 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846 "%d bytes of headers & data in chain of large.\n",
1848 skb = sbq_desc->p.skb;
1849 sbq_desc->p.skb = NULL;
1850 skb_reserve(skb, NET_IP_ALIGN);
1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854 size = min(length, qdev->lbq_buf_size);
1856 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857 "Adding page %d to skb for %d bytes.\n",
1859 skb_fill_page_desc(skb, i,
1860 lbq_desc->p.pg_chunk.page,
1861 lbq_desc->p.pg_chunk.offset,
1864 skb->data_len += size;
1865 skb->truesize += size;
1868 } while (length > 0);
1869 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1871 __pskb_pull_tail(skb, hlen);
1876 /* Process an inbound completion from an rx ring. */
1877 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1878 struct rx_ring *rx_ring,
1879 struct ib_mac_iocb_rsp *ib_mac_rsp,
1882 struct net_device *ndev = qdev->ndev;
1883 struct sk_buff *skb = NULL;
1885 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1887 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1888 if (unlikely(!skb)) {
1889 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1890 "No skb available, drop packet.\n");
1891 rx_ring->rx_dropped++;
1895 /* Frame error, so drop the packet. */
1896 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1897 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1898 dev_kfree_skb_any(skb);
1902 /* The max framesize filter on this chip is set higher than
1903 * MTU since FCoE uses 2k frames.
1905 if (skb->len > ndev->mtu + ETH_HLEN) {
1906 dev_kfree_skb_any(skb);
1907 rx_ring->rx_dropped++;
1911 /* loopback self test for ethtool */
1912 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1913 ql_check_lb_frame(qdev, skb);
1914 dev_kfree_skb_any(skb);
1918 prefetch(skb->data);
1919 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1920 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1921 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1922 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1923 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1924 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1925 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1926 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1927 rx_ring->rx_multicast++;
1929 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1930 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1931 "Promiscuous Packet.\n");
1934 skb->protocol = eth_type_trans(skb, ndev);
1935 skb_checksum_none_assert(skb);
1937 /* If rx checksum is on, and there are no
1938 * csum or frame errors.
1940 if ((ndev->features & NETIF_F_RXCSUM) &&
1941 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1943 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1944 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1945 "TCP checksum done!\n");
1946 skb->ip_summed = CHECKSUM_UNNECESSARY;
1947 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1948 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1949 /* Unfragmented ipv4 UDP frame. */
1950 struct iphdr *iph = (struct iphdr *) skb->data;
1951 if (!(iph->frag_off &
1952 htons(IP_MF|IP_OFFSET))) {
1953 skb->ip_summed = CHECKSUM_UNNECESSARY;
1954 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1955 "TCP checksum done!\n");
1960 rx_ring->rx_packets++;
1961 rx_ring->rx_bytes += skb->len;
1962 skb_record_rx_queue(skb, rx_ring->cq_id);
1963 if (vlan_id != 0xffff)
1964 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1965 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1966 napi_gro_receive(&rx_ring->napi, skb);
1968 netif_receive_skb(skb);
1971 /* Process an inbound completion from an rx ring. */
1972 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1973 struct rx_ring *rx_ring,
1974 struct ib_mac_iocb_rsp *ib_mac_rsp)
1976 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1977 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1978 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1979 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1980 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1982 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1984 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1985 /* The data and headers are split into
1988 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1990 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1991 /* The data fit in a single small buffer.
1992 * Allocate a new skb, copy the data and
1993 * return the buffer to the free pool.
1995 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
1997 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1998 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1999 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2000 /* TCP packet in a page chunk that's been checksummed.
2001 * Tack it on to our GRO skb and let it go.
2003 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2005 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2006 /* Non-TCP packet in a page chunk. Allocate an
2007 * skb, tack it on frags, and send it up.
2009 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2012 /* Non-TCP/UDP large frames that span multiple buffers
2013 * can be processed corrrectly by the split frame logic.
2015 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2019 return (unsigned long)length;
2022 /* Process an outbound completion from an rx ring. */
2023 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2024 struct ob_mac_iocb_rsp *mac_rsp)
2026 struct tx_ring *tx_ring;
2027 struct tx_ring_desc *tx_ring_desc;
2029 QL_DUMP_OB_MAC_RSP(mac_rsp);
2030 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2031 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2032 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2033 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2034 tx_ring->tx_packets++;
2035 dev_kfree_skb(tx_ring_desc->skb);
2036 tx_ring_desc->skb = NULL;
2038 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2041 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2042 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2043 netif_warn(qdev, tx_done, qdev->ndev,
2044 "Total descriptor length did not match transfer length.\n");
2046 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2047 netif_warn(qdev, tx_done, qdev->ndev,
2048 "Frame too short to be valid, not sent.\n");
2050 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2051 netif_warn(qdev, tx_done, qdev->ndev,
2052 "Frame too long, but sent anyway.\n");
2054 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2055 netif_warn(qdev, tx_done, qdev->ndev,
2056 "PCI backplane error. Frame not sent.\n");
2059 atomic_inc(&tx_ring->tx_count);
2062 /* Fire up a handler to reset the MPI processor. */
2063 void ql_queue_fw_error(struct ql_adapter *qdev)
2066 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2069 void ql_queue_asic_error(struct ql_adapter *qdev)
2072 ql_disable_interrupts(qdev);
2073 /* Clear adapter up bit to signal the recovery
2074 * process that it shouldn't kill the reset worker
2077 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2078 /* Set asic recovery bit to indicate reset process that we are
2079 * in fatal error recovery process rather than normal close
2081 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2082 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2085 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2086 struct ib_ae_iocb_rsp *ib_ae_rsp)
2088 switch (ib_ae_rsp->event) {
2089 case MGMT_ERR_EVENT:
2090 netif_err(qdev, rx_err, qdev->ndev,
2091 "Management Processor Fatal Error.\n");
2092 ql_queue_fw_error(qdev);
2095 case CAM_LOOKUP_ERR_EVENT:
2096 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2097 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2098 ql_queue_asic_error(qdev);
2101 case SOFT_ECC_ERROR_EVENT:
2102 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2103 ql_queue_asic_error(qdev);
2106 case PCI_ERR_ANON_BUF_RD:
2107 netdev_err(qdev->ndev, "PCI error occurred when reading "
2108 "anonymous buffers from rx_ring %d.\n",
2110 ql_queue_asic_error(qdev);
2114 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2116 ql_queue_asic_error(qdev);
2121 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2123 struct ql_adapter *qdev = rx_ring->qdev;
2124 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2125 struct ob_mac_iocb_rsp *net_rsp = NULL;
2128 struct tx_ring *tx_ring;
2129 /* While there are entries in the completion queue. */
2130 while (prod != rx_ring->cnsmr_idx) {
2132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2133 "cq_id = %d, prod = %d, cnsmr = %d\n",
2134 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2136 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2138 switch (net_rsp->opcode) {
2140 case OPCODE_OB_MAC_TSO_IOCB:
2141 case OPCODE_OB_MAC_IOCB:
2142 ql_process_mac_tx_intr(qdev, net_rsp);
2145 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2146 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2150 ql_update_cq(rx_ring);
2151 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2155 ql_write_cq_idx(rx_ring);
2156 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2157 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2158 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2160 * The queue got stopped because the tx_ring was full.
2161 * Wake it up, because it's now at least 25% empty.
2163 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2169 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2171 struct ql_adapter *qdev = rx_ring->qdev;
2172 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2173 struct ql_net_rsp_iocb *net_rsp;
2176 /* While there are entries in the completion queue. */
2177 while (prod != rx_ring->cnsmr_idx) {
2179 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2180 "cq_id = %d, prod = %d, cnsmr = %d\n",
2181 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2183 net_rsp = rx_ring->curr_entry;
2185 switch (net_rsp->opcode) {
2186 case OPCODE_IB_MAC_IOCB:
2187 ql_process_mac_rx_intr(qdev, rx_ring,
2188 (struct ib_mac_iocb_rsp *)
2192 case OPCODE_IB_AE_IOCB:
2193 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2197 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2198 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2203 ql_update_cq(rx_ring);
2204 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2205 if (count == budget)
2208 ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2209 ql_write_cq_idx(rx_ring);
2213 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2215 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2216 struct ql_adapter *qdev = rx_ring->qdev;
2217 struct rx_ring *trx_ring;
2218 int i, work_done = 0;
2219 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2221 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2222 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2224 /* Service the TX rings first. They start
2225 * right after the RSS rings. */
2226 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2227 trx_ring = &qdev->rx_ring[i];
2228 /* If this TX completion ring belongs to this vector and
2229 * it's not empty then service it.
2231 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2232 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2233 trx_ring->cnsmr_idx)) {
2234 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2235 "%s: Servicing TX completion ring %d.\n",
2236 __func__, trx_ring->cq_id);
2237 ql_clean_outbound_rx_ring(trx_ring);
2242 * Now service the RSS ring if it's active.
2244 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2245 rx_ring->cnsmr_idx) {
2246 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2247 "%s: Servicing RX completion ring %d.\n",
2248 __func__, rx_ring->cq_id);
2249 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2252 if (work_done < budget) {
2253 napi_complete_done(napi, work_done);
2254 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2259 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2261 struct ql_adapter *qdev = netdev_priv(ndev);
2263 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2264 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2265 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2267 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2272 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2273 * based on the features to enable/disable hardware vlan accel
2275 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2276 netdev_features_t features)
2278 struct ql_adapter *qdev = netdev_priv(ndev);
2280 bool need_restart = netif_running(ndev);
2283 status = ql_adapter_down(qdev);
2285 netif_err(qdev, link, qdev->ndev,
2286 "Failed to bring down the adapter\n");
2291 /* update the features with resent change */
2292 ndev->features = features;
2295 status = ql_adapter_up(qdev);
2297 netif_err(qdev, link, qdev->ndev,
2298 "Failed to bring up the adapter\n");
2306 static int qlge_set_features(struct net_device *ndev,
2307 netdev_features_t features)
2309 netdev_features_t changed = ndev->features ^ features;
2312 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2313 /* Update the behavior of vlan accel in the adapter */
2314 err = qlge_update_hw_vlan_features(ndev, features);
2318 qlge_vlan_mode(ndev, features);
2324 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2326 u32 enable_bit = MAC_ADDR_E;
2329 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2330 MAC_ADDR_TYPE_VLAN, vid);
2332 netif_err(qdev, ifup, qdev->ndev,
2333 "Failed to init vlan address.\n");
2337 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2339 struct ql_adapter *qdev = netdev_priv(ndev);
2343 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2347 err = __qlge_vlan_rx_add_vid(qdev, vid);
2348 set_bit(vid, qdev->active_vlans);
2350 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2355 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2360 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2361 MAC_ADDR_TYPE_VLAN, vid);
2363 netif_err(qdev, ifup, qdev->ndev,
2364 "Failed to clear vlan address.\n");
2368 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2370 struct ql_adapter *qdev = netdev_priv(ndev);
2374 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2378 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2379 clear_bit(vid, qdev->active_vlans);
2381 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2386 static void qlge_restore_vlan(struct ql_adapter *qdev)
2391 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2395 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2396 __qlge_vlan_rx_add_vid(qdev, vid);
2398 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2401 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2402 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2404 struct rx_ring *rx_ring = dev_id;
2405 napi_schedule(&rx_ring->napi);
2409 /* This handles a fatal error, MPI activity, and the default
2410 * rx_ring in an MSI-X multiple vector environment.
2411 * In MSI/Legacy environment it also process the rest of
2414 static irqreturn_t qlge_isr(int irq, void *dev_id)
2416 struct rx_ring *rx_ring = dev_id;
2417 struct ql_adapter *qdev = rx_ring->qdev;
2418 struct intr_context *intr_context = &qdev->intr_context[0];
2422 /* Experience shows that when using INTx interrupts, interrupts must
2423 * be masked manually.
2424 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2425 * (even though it is auto-masked), otherwise a later command to
2426 * enable it is not effective.
2428 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2429 ql_disable_completion_interrupt(qdev, 0);
2431 var = ql_read32(qdev, STS);
2434 * Check for fatal error.
2437 ql_disable_completion_interrupt(qdev, 0);
2438 ql_queue_asic_error(qdev);
2439 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2440 var = ql_read32(qdev, ERR_STS);
2441 netdev_err(qdev->ndev, "Resetting chip. "
2442 "Error Status Register = 0x%x\n", var);
2447 * Check MPI processor activity.
2449 if ((var & STS_PI) &&
2450 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2452 * We've got an async event or mailbox completion.
2453 * Handle it and clear the source of the interrupt.
2455 netif_err(qdev, intr, qdev->ndev,
2456 "Got MPI processor interrupt.\n");
2457 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2458 queue_delayed_work_on(smp_processor_id(),
2459 qdev->workqueue, &qdev->mpi_work, 0);
2464 * Get the bit-mask that shows the active queues for this
2465 * pass. Compare it to the queues that this irq services
2466 * and call napi if there's a match.
2468 var = ql_read32(qdev, ISR1);
2469 if (var & intr_context->irq_mask) {
2470 netif_info(qdev, intr, qdev->ndev,
2471 "Waking handler for rx_ring[0].\n");
2472 napi_schedule(&rx_ring->napi);
2475 /* Experience shows that the device sometimes signals an
2476 * interrupt but no work is scheduled from this function.
2477 * Nevertheless, the interrupt is auto-masked. Therefore, we
2478 * systematically re-enable the interrupt if we didn't
2481 ql_enable_completion_interrupt(qdev, 0);
2484 return work_done ? IRQ_HANDLED : IRQ_NONE;
2487 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2490 if (skb_is_gso(skb)) {
2492 __be16 l3_proto = vlan_get_protocol(skb);
2494 err = skb_cow_head(skb, 0);
2498 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2499 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2500 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2501 mac_iocb_ptr->total_hdrs_len =
2502 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2503 mac_iocb_ptr->net_trans_offset =
2504 cpu_to_le16(skb_network_offset(skb) |
2505 skb_transport_offset(skb)
2506 << OB_MAC_TRANSPORT_HDR_SHIFT);
2507 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2508 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2509 if (likely(l3_proto == htons(ETH_P_IP))) {
2510 struct iphdr *iph = ip_hdr(skb);
2512 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2513 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2517 } else if (l3_proto == htons(ETH_P_IPV6)) {
2518 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2519 tcp_hdr(skb)->check =
2520 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2521 &ipv6_hdr(skb)->daddr,
2529 static void ql_hw_csum_setup(struct sk_buff *skb,
2530 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2533 struct iphdr *iph = ip_hdr(skb);
2535 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2536 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2537 mac_iocb_ptr->net_trans_offset =
2538 cpu_to_le16(skb_network_offset(skb) |
2539 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2541 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2542 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2543 if (likely(iph->protocol == IPPROTO_TCP)) {
2544 check = &(tcp_hdr(skb)->check);
2545 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2546 mac_iocb_ptr->total_hdrs_len =
2547 cpu_to_le16(skb_transport_offset(skb) +
2548 (tcp_hdr(skb)->doff << 2));
2550 check = &(udp_hdr(skb)->check);
2551 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2552 mac_iocb_ptr->total_hdrs_len =
2553 cpu_to_le16(skb_transport_offset(skb) +
2554 sizeof(struct udphdr));
2556 *check = ~csum_tcpudp_magic(iph->saddr,
2557 iph->daddr, len, iph->protocol, 0);
2560 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2562 struct tx_ring_desc *tx_ring_desc;
2563 struct ob_mac_iocb_req *mac_iocb_ptr;
2564 struct ql_adapter *qdev = netdev_priv(ndev);
2566 struct tx_ring *tx_ring;
2567 u32 tx_ring_idx = (u32) skb->queue_mapping;
2569 tx_ring = &qdev->tx_ring[tx_ring_idx];
2571 if (skb_padto(skb, ETH_ZLEN))
2572 return NETDEV_TX_OK;
2574 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2575 netif_info(qdev, tx_queued, qdev->ndev,
2576 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2577 __func__, tx_ring_idx);
2578 netif_stop_subqueue(ndev, tx_ring->wq_id);
2579 tx_ring->tx_errors++;
2580 return NETDEV_TX_BUSY;
2582 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2583 mac_iocb_ptr = tx_ring_desc->queue_entry;
2584 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2586 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2587 mac_iocb_ptr->tid = tx_ring_desc->index;
2588 /* We use the upper 32-bits to store the tx queue for this IO.
2589 * When we get the completion we can use it to establish the context.
2591 mac_iocb_ptr->txq_idx = tx_ring_idx;
2592 tx_ring_desc->skb = skb;
2594 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2596 if (skb_vlan_tag_present(skb)) {
2597 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2598 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2599 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2600 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2602 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2604 dev_kfree_skb_any(skb);
2605 return NETDEV_TX_OK;
2606 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2607 ql_hw_csum_setup(skb,
2608 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2610 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2612 netif_err(qdev, tx_queued, qdev->ndev,
2613 "Could not map the segments.\n");
2614 tx_ring->tx_errors++;
2615 return NETDEV_TX_BUSY;
2617 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2618 tx_ring->prod_idx++;
2619 if (tx_ring->prod_idx == tx_ring->wq_len)
2620 tx_ring->prod_idx = 0;
2623 ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2624 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2625 "tx queued, slot %d, len %d\n",
2626 tx_ring->prod_idx, skb->len);
2628 atomic_dec(&tx_ring->tx_count);
2630 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2631 netif_stop_subqueue(ndev, tx_ring->wq_id);
2632 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2634 * The queue got stopped because the tx_ring was full.
2635 * Wake it up, because it's now at least 25% empty.
2637 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2639 return NETDEV_TX_OK;
2643 static void ql_free_shadow_space(struct ql_adapter *qdev)
2645 if (qdev->rx_ring_shadow_reg_area) {
2646 pci_free_consistent(qdev->pdev,
2648 qdev->rx_ring_shadow_reg_area,
2649 qdev->rx_ring_shadow_reg_dma);
2650 qdev->rx_ring_shadow_reg_area = NULL;
2652 if (qdev->tx_ring_shadow_reg_area) {
2653 pci_free_consistent(qdev->pdev,
2655 qdev->tx_ring_shadow_reg_area,
2656 qdev->tx_ring_shadow_reg_dma);
2657 qdev->tx_ring_shadow_reg_area = NULL;
2661 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2663 qdev->rx_ring_shadow_reg_area =
2664 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2665 &qdev->rx_ring_shadow_reg_dma);
2666 if (!qdev->rx_ring_shadow_reg_area) {
2667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of RX shadow space failed.\n");
2672 qdev->tx_ring_shadow_reg_area =
2673 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2674 &qdev->tx_ring_shadow_reg_dma);
2675 if (!qdev->tx_ring_shadow_reg_area) {
2676 netif_err(qdev, ifup, qdev->ndev,
2677 "Allocation of TX shadow space failed.\n");
2678 goto err_wqp_sh_area;
2683 pci_free_consistent(qdev->pdev,
2685 qdev->rx_ring_shadow_reg_area,
2686 qdev->rx_ring_shadow_reg_dma);
2690 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2692 struct tx_ring_desc *tx_ring_desc;
2694 struct ob_mac_iocb_req *mac_iocb_ptr;
2696 mac_iocb_ptr = tx_ring->wq_base;
2697 tx_ring_desc = tx_ring->q;
2698 for (i = 0; i < tx_ring->wq_len; i++) {
2699 tx_ring_desc->index = i;
2700 tx_ring_desc->skb = NULL;
2701 tx_ring_desc->queue_entry = mac_iocb_ptr;
2705 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2708 static void ql_free_tx_resources(struct ql_adapter *qdev,
2709 struct tx_ring *tx_ring)
2711 if (tx_ring->wq_base) {
2712 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2713 tx_ring->wq_base, tx_ring->wq_base_dma);
2714 tx_ring->wq_base = NULL;
2720 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2721 struct tx_ring *tx_ring)
2724 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2725 &tx_ring->wq_base_dma);
2727 if (!tx_ring->wq_base ||
2728 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2732 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2739 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2740 tx_ring->wq_base, tx_ring->wq_base_dma);
2741 tx_ring->wq_base = NULL;
2743 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2747 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2749 struct qlge_bq *lbq = &rx_ring->lbq;
2750 unsigned int last_offset;
2752 last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2753 while (lbq->next_to_clean != lbq->next_to_use) {
2754 struct qlge_bq_desc *lbq_desc =
2755 &lbq->queue[lbq->next_to_clean];
2757 if (lbq_desc->p.pg_chunk.offset == last_offset)
2758 pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
2759 ql_lbq_block_size(qdev),
2760 PCI_DMA_FROMDEVICE);
2761 put_page(lbq_desc->p.pg_chunk.page);
2763 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2766 if (rx_ring->master_chunk.page) {
2767 pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
2768 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2769 put_page(rx_ring->master_chunk.page);
2770 rx_ring->master_chunk.page = NULL;
2774 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2778 for (i = 0; i < QLGE_BQ_LEN; i++) {
2779 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2782 netif_err(qdev, ifup, qdev->ndev,
2783 "sbq_desc %d is NULL.\n", i);
2786 if (sbq_desc->p.skb) {
2787 pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
2789 PCI_DMA_FROMDEVICE);
2790 dev_kfree_skb(sbq_desc->p.skb);
2791 sbq_desc->p.skb = NULL;
2796 /* Free all large and small rx buffers associated
2797 * with the completion queues for this device.
2799 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2803 for (i = 0; i < qdev->rx_ring_count; i++) {
2804 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2806 if (rx_ring->lbq.queue)
2807 ql_free_lbq_buffers(qdev, rx_ring);
2808 if (rx_ring->sbq.queue)
2809 ql_free_sbq_buffers(qdev, rx_ring);
2813 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2817 for (i = 0; i < qdev->rss_ring_count; i++)
2818 ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2822 static int qlge_init_bq(struct qlge_bq *bq)
2824 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2825 struct ql_adapter *qdev = rx_ring->qdev;
2826 struct qlge_bq_desc *bq_desc;
2830 bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
2833 netif_err(qdev, ifup, qdev->ndev,
2834 "ring %u %s allocation failed.\n", rx_ring->cq_id,
2835 bq_type_name[bq->type]);
2839 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2845 bq_desc = &bq->queue[0];
2846 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2847 bq_desc->p.skb = NULL;
2849 bq_desc->buf_ptr = buf_ptr;
2855 static void ql_free_rx_resources(struct ql_adapter *qdev,
2856 struct rx_ring *rx_ring)
2858 /* Free the small buffer queue. */
2859 if (rx_ring->sbq.base) {
2860 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2861 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2862 rx_ring->sbq.base = NULL;
2865 /* Free the small buffer queue control blocks. */
2866 kfree(rx_ring->sbq.queue);
2867 rx_ring->sbq.queue = NULL;
2869 /* Free the large buffer queue. */
2870 if (rx_ring->lbq.base) {
2871 pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
2872 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2873 rx_ring->lbq.base = NULL;
2876 /* Free the large buffer queue control blocks. */
2877 kfree(rx_ring->lbq.queue);
2878 rx_ring->lbq.queue = NULL;
2880 /* Free the rx queue. */
2881 if (rx_ring->cq_base) {
2882 pci_free_consistent(qdev->pdev,
2884 rx_ring->cq_base, rx_ring->cq_base_dma);
2885 rx_ring->cq_base = NULL;
2889 /* Allocate queues and buffers for this completions queue based
2890 * on the values in the parameter structure. */
2891 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2892 struct rx_ring *rx_ring)
2896 * Allocate the completion queue for this rx_ring.
2899 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2900 &rx_ring->cq_base_dma);
2902 if (!rx_ring->cq_base) {
2903 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2907 if (rx_ring->cq_id < qdev->rss_ring_count &&
2908 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2909 ql_free_rx_resources(qdev, rx_ring);
2916 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2918 struct tx_ring *tx_ring;
2919 struct tx_ring_desc *tx_ring_desc;
2923 * Loop through all queues and free
2926 for (j = 0; j < qdev->tx_ring_count; j++) {
2927 tx_ring = &qdev->tx_ring[j];
2928 for (i = 0; i < tx_ring->wq_len; i++) {
2929 tx_ring_desc = &tx_ring->q[i];
2930 if (tx_ring_desc && tx_ring_desc->skb) {
2931 netif_err(qdev, ifdown, qdev->ndev,
2932 "Freeing lost SKB %p, from queue %d, index %d.\n",
2933 tx_ring_desc->skb, j,
2934 tx_ring_desc->index);
2935 ql_unmap_send(qdev, tx_ring_desc,
2936 tx_ring_desc->map_cnt);
2937 dev_kfree_skb(tx_ring_desc->skb);
2938 tx_ring_desc->skb = NULL;
2944 static void ql_free_mem_resources(struct ql_adapter *qdev)
2948 for (i = 0; i < qdev->tx_ring_count; i++)
2949 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2950 for (i = 0; i < qdev->rx_ring_count; i++)
2951 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2952 ql_free_shadow_space(qdev);
2955 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2959 /* Allocate space for our shadow registers and such. */
2960 if (ql_alloc_shadow_space(qdev))
2963 for (i = 0; i < qdev->rx_ring_count; i++) {
2964 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2965 netif_err(qdev, ifup, qdev->ndev,
2966 "RX resource allocation failed.\n");
2970 /* Allocate tx queue resources */
2971 for (i = 0; i < qdev->tx_ring_count; i++) {
2972 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2973 netif_err(qdev, ifup, qdev->ndev,
2974 "TX resource allocation failed.\n");
2981 ql_free_mem_resources(qdev);
2985 /* Set up the rx ring control block and pass it to the chip.
2986 * The control block is defined as
2987 * "Completion Queue Initialization Control Block", or cqicb.
2989 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2991 struct cqicb *cqicb = &rx_ring->cqicb;
2992 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2993 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2994 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2995 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2996 void __iomem *doorbell_area =
2997 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3000 __le64 *base_indirect_ptr;
3003 /* Set up the shadow registers for this ring. */
3004 rx_ring->prod_idx_sh_reg = shadow_reg;
3005 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3006 *rx_ring->prod_idx_sh_reg = 0;
3007 shadow_reg += sizeof(u64);
3008 shadow_reg_dma += sizeof(u64);
3009 rx_ring->lbq.base_indirect = shadow_reg;
3010 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
3011 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3012 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3013 rx_ring->sbq.base_indirect = shadow_reg;
3014 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
3016 /* PCI doorbell mem area + 0x00 for consumer index register */
3017 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3018 rx_ring->cnsmr_idx = 0;
3019 rx_ring->curr_entry = rx_ring->cq_base;
3021 /* PCI doorbell mem area + 0x04 for valid register */
3022 rx_ring->valid_db_reg = doorbell_area + 0x04;
3024 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3025 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3027 /* PCI doorbell mem area + 0x1c */
3028 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3030 memset((void *)cqicb, 0, sizeof(struct cqicb));
3031 cqicb->msix_vect = rx_ring->irq;
3033 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3036 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3038 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3041 * Set up the control block load flags.
3043 cqicb->flags = FLAGS_LC | /* Load queue base address */
3044 FLAGS_LV | /* Load MSI-X vector */
3045 FLAGS_LI; /* Load irq delay values */
3046 if (rx_ring->cq_id < qdev->rss_ring_count) {
3047 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3048 tmp = (u64)rx_ring->lbq.base_dma;
3049 base_indirect_ptr = rx_ring->lbq.base_indirect;
3052 *base_indirect_ptr = cpu_to_le64(tmp);
3053 tmp += DB_PAGE_SIZE;
3054 base_indirect_ptr++;
3056 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3057 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3058 cqicb->lbq_buf_size =
3059 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3060 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3061 rx_ring->lbq.next_to_use = 0;
3062 rx_ring->lbq.next_to_clean = 0;
3064 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3065 tmp = (u64)rx_ring->sbq.base_dma;
3066 base_indirect_ptr = rx_ring->sbq.base_indirect;
3069 *base_indirect_ptr = cpu_to_le64(tmp);
3070 tmp += DB_PAGE_SIZE;
3071 base_indirect_ptr++;
3073 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3075 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3076 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3077 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3078 rx_ring->sbq.next_to_use = 0;
3079 rx_ring->sbq.next_to_clean = 0;
3081 if (rx_ring->cq_id < qdev->rss_ring_count) {
3082 /* Inbound completion handling rx_rings run in
3083 * separate NAPI contexts.
3085 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3087 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3088 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3090 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3091 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3093 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3094 CFG_LCQ, rx_ring->cq_id);
3096 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3102 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3104 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3105 void __iomem *doorbell_area =
3106 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3107 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3108 (tx_ring->wq_id * sizeof(u64));
3109 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3110 (tx_ring->wq_id * sizeof(u64));
3114 * Assign doorbell registers for this tx_ring.
3116 /* TX PCI doorbell mem area for tx producer index */
3117 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3118 tx_ring->prod_idx = 0;
3119 /* TX PCI doorbell mem area + 0x04 */
3120 tx_ring->valid_db_reg = doorbell_area + 0x04;
3123 * Assign shadow registers for this tx_ring.
3125 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3126 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3128 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3129 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3130 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3131 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3133 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3135 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3137 ql_init_tx_ring(qdev, tx_ring);
3139 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3140 (u16) tx_ring->wq_id);
3142 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3148 static void ql_disable_msix(struct ql_adapter *qdev)
3150 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3151 pci_disable_msix(qdev->pdev);
3152 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3153 kfree(qdev->msi_x_entry);
3154 qdev->msi_x_entry = NULL;
3155 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3156 pci_disable_msi(qdev->pdev);
3157 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3161 /* We start by trying to get the number of vectors
3162 * stored in qdev->intr_count. If we don't get that
3163 * many then we reduce the count and try again.
3165 static void ql_enable_msix(struct ql_adapter *qdev)
3169 /* Get the MSIX vectors. */
3170 if (qlge_irq_type == MSIX_IRQ) {
3171 /* Try to alloc space for the msix struct,
3172 * if it fails then go to MSI/legacy.
3174 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3175 sizeof(struct msix_entry),
3177 if (!qdev->msi_x_entry) {
3178 qlge_irq_type = MSI_IRQ;
3182 for (i = 0; i < qdev->intr_count; i++)
3183 qdev->msi_x_entry[i].entry = i;
3185 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3186 1, qdev->intr_count);
3188 kfree(qdev->msi_x_entry);
3189 qdev->msi_x_entry = NULL;
3190 netif_warn(qdev, ifup, qdev->ndev,
3191 "MSI-X Enable failed, trying MSI.\n");
3192 qlge_irq_type = MSI_IRQ;
3194 qdev->intr_count = err;
3195 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3196 netif_info(qdev, ifup, qdev->ndev,
3197 "MSI-X Enabled, got %d vectors.\n",
3203 qdev->intr_count = 1;
3204 if (qlge_irq_type == MSI_IRQ) {
3205 if (!pci_enable_msi(qdev->pdev)) {
3206 set_bit(QL_MSI_ENABLED, &qdev->flags);
3207 netif_info(qdev, ifup, qdev->ndev,
3208 "Running with MSI interrupts.\n");
3212 qlge_irq_type = LEG_IRQ;
3213 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3214 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3215 "Running with legacy interrupts.\n");
3218 /* Each vector services 1 RSS ring and and 1 or more
3219 * TX completion rings. This function loops through
3220 * the TX completion rings and assigns the vector that
3221 * will service it. An example would be if there are
3222 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3223 * This would mean that vector 0 would service RSS ring 0
3224 * and TX completion rings 0,1,2 and 3. Vector 1 would
3225 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3227 static void ql_set_tx_vect(struct ql_adapter *qdev)
3230 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3232 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3233 /* Assign irq vectors to TX rx_rings.*/
3234 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3235 i < qdev->rx_ring_count; i++) {
3236 if (j == tx_rings_per_vector) {
3240 qdev->rx_ring[i].irq = vect;
3244 /* For single vector all rings have an irq
3247 for (i = 0; i < qdev->rx_ring_count; i++)
3248 qdev->rx_ring[i].irq = 0;
3252 /* Set the interrupt mask for this vector. Each vector
3253 * will service 1 RSS ring and 1 or more TX completion
3254 * rings. This function sets up a bit mask per vector
3255 * that indicates which rings it services.
3257 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3259 int j, vect = ctx->intr;
3260 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3262 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3263 /* Add the RSS ring serviced by this vector
3266 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3267 /* Add the TX ring(s) serviced by this vector
3269 for (j = 0; j < tx_rings_per_vector; j++) {
3271 (1 << qdev->rx_ring[qdev->rss_ring_count +
3272 (vect * tx_rings_per_vector) + j].cq_id);
3275 /* For single vector we just shift each queue's
3278 for (j = 0; j < qdev->rx_ring_count; j++)
3279 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3284 * Here we build the intr_context structures based on
3285 * our rx_ring count and intr vector count.
3286 * The intr_context structure is used to hook each vector
3287 * to possibly different handlers.
3289 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3292 struct intr_context *intr_context = &qdev->intr_context[0];
3294 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3295 /* Each rx_ring has it's
3296 * own intr_context since we have separate
3297 * vectors for each queue.
3299 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3300 qdev->rx_ring[i].irq = i;
3301 intr_context->intr = i;
3302 intr_context->qdev = qdev;
3303 /* Set up this vector's bit-mask that indicates
3304 * which queues it services.
3306 ql_set_irq_mask(qdev, intr_context);
3308 * We set up each vectors enable/disable/read bits so
3309 * there's no bit/mask calculations in the critical path.
3311 intr_context->intr_en_mask =
3312 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3313 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3315 intr_context->intr_dis_mask =
3316 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3317 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3319 intr_context->intr_read_mask =
3320 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3321 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3324 /* The first vector/queue handles
3325 * broadcast/multicast, fatal errors,
3326 * and firmware events. This in addition
3327 * to normal inbound NAPI processing.
3329 intr_context->handler = qlge_isr;
3330 sprintf(intr_context->name, "%s-rx-%d",
3331 qdev->ndev->name, i);
3334 * Inbound queues handle unicast frames only.
3336 intr_context->handler = qlge_msix_rx_isr;
3337 sprintf(intr_context->name, "%s-rx-%d",
3338 qdev->ndev->name, i);
3343 * All rx_rings use the same intr_context since
3344 * there is only one vector.
3346 intr_context->intr = 0;
3347 intr_context->qdev = qdev;
3349 * We set up each vectors enable/disable/read bits so
3350 * there's no bit/mask calculations in the critical path.
3352 intr_context->intr_en_mask =
3353 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3354 intr_context->intr_dis_mask =
3355 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3356 INTR_EN_TYPE_DISABLE;
3357 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3358 /* Experience shows that when using INTx interrupts,
3359 * the device does not always auto-mask INTR_EN_EN.
3360 * Moreover, masking INTR_EN_EN manually does not
3361 * immediately prevent interrupt generation.
3363 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3365 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3367 intr_context->intr_read_mask =
3368 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3370 * Single interrupt means one handler for all rings.
3372 intr_context->handler = qlge_isr;
3373 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3374 /* Set up this vector's bit-mask that indicates
3375 * which queues it services. In this case there is
3376 * a single vector so it will service all RSS and
3377 * TX completion rings.
3379 ql_set_irq_mask(qdev, intr_context);
3381 /* Tell the TX completion rings which MSIx vector
3382 * they will be using.
3384 ql_set_tx_vect(qdev);
3387 static void ql_free_irq(struct ql_adapter *qdev)
3390 struct intr_context *intr_context = &qdev->intr_context[0];
3392 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3393 if (intr_context->hooked) {
3394 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3395 free_irq(qdev->msi_x_entry[i].vector,
3398 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3402 ql_disable_msix(qdev);
3405 static int ql_request_irq(struct ql_adapter *qdev)
3409 struct pci_dev *pdev = qdev->pdev;
3410 struct intr_context *intr_context = &qdev->intr_context[0];
3412 ql_resolve_queues_to_irqs(qdev);
3414 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3415 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3416 status = request_irq(qdev->msi_x_entry[i].vector,
3417 intr_context->handler,
3422 netif_err(qdev, ifup, qdev->ndev,
3423 "Failed request for MSIX interrupt %d.\n",
3428 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3429 "trying msi or legacy interrupts.\n");
3430 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3431 "%s: irq = %d.\n", __func__, pdev->irq);
3432 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3433 "%s: context->name = %s.\n", __func__,
3434 intr_context->name);
3435 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3436 "%s: dev_id = 0x%p.\n", __func__,
3439 request_irq(pdev->irq, qlge_isr,
3440 test_bit(QL_MSI_ENABLED,
3442 flags) ? 0 : IRQF_SHARED,
3443 intr_context->name, &qdev->rx_ring[0]);
3447 netif_err(qdev, ifup, qdev->ndev,
3448 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3449 intr_context->name);
3451 intr_context->hooked = 1;
3455 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3460 static int ql_start_rss(struct ql_adapter *qdev)
3462 static const u8 init_hash_seed[] = {
3463 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3464 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3465 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3466 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3467 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3469 struct ricb *ricb = &qdev->ricb;
3472 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3474 memset((void *)ricb, 0, sizeof(*ricb));
3476 ricb->base_cq = RSS_L4K;
3478 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3479 ricb->mask = cpu_to_le16((u16)(0x3ff));
3482 * Fill out the Indirection Table.
3484 for (i = 0; i < 1024; i++)
3485 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3487 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3488 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3490 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3492 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3498 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3502 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3505 /* Clear all the entries in the routing table. */
3506 for (i = 0; i < 16; i++) {
3507 status = ql_set_routing_reg(qdev, i, 0, 0);
3509 netif_err(qdev, ifup, qdev->ndev,
3510 "Failed to init routing register for CAM packets.\n");
3514 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3518 /* Initialize the frame-to-queue routing. */
3519 static int ql_route_initialize(struct ql_adapter *qdev)
3523 /* Clear all the entries in the routing table. */
3524 status = ql_clear_routing_entries(qdev);
3528 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3532 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3533 RT_IDX_IP_CSUM_ERR, 1);
3535 netif_err(qdev, ifup, qdev->ndev,
3536 "Failed to init routing register "
3537 "for IP CSUM error packets.\n");
3540 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3541 RT_IDX_TU_CSUM_ERR, 1);
3543 netif_err(qdev, ifup, qdev->ndev,
3544 "Failed to init routing register "
3545 "for TCP/UDP CSUM error packets.\n");
3548 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3550 netif_err(qdev, ifup, qdev->ndev,
3551 "Failed to init routing register for broadcast packets.\n");
3554 /* If we have more than one inbound queue, then turn on RSS in the
3557 if (qdev->rss_ring_count > 1) {
3558 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3559 RT_IDX_RSS_MATCH, 1);
3561 netif_err(qdev, ifup, qdev->ndev,
3562 "Failed to init routing register for MATCH RSS packets.\n");
3567 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3570 netif_err(qdev, ifup, qdev->ndev,
3571 "Failed to init routing register for CAM packets.\n");
3573 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3577 int ql_cam_route_initialize(struct ql_adapter *qdev)
3581 /* If check if the link is up and use to
3582 * determine if we are setting or clearing
3583 * the MAC address in the CAM.
3585 set = ql_read32(qdev, STS);
3586 set &= qdev->port_link_up;
3587 status = ql_set_mac_addr(qdev, set);
3589 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3593 status = ql_route_initialize(qdev);
3595 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3600 static int ql_adapter_initialize(struct ql_adapter *qdev)
3607 * Set up the System register to halt on errors.
3609 value = SYS_EFE | SYS_FAE;
3611 ql_write32(qdev, SYS, mask | value);
3613 /* Set the default queue, and VLAN behavior. */
3614 value = NIC_RCV_CFG_DFQ;
3615 mask = NIC_RCV_CFG_DFQ_MASK;
3616 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3617 value |= NIC_RCV_CFG_RV;
3618 mask |= (NIC_RCV_CFG_RV << 16);
3620 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3622 /* Set the MPI interrupt to enabled. */
3623 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3625 /* Enable the function, set pagesize, enable error checking. */
3626 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3627 FSC_EC | FSC_VM_PAGE_4K;
3628 value |= SPLT_SETTING;
3630 /* Set/clear header splitting. */
3631 mask = FSC_VM_PAGESIZE_MASK |
3632 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3633 ql_write32(qdev, FSC, mask | value);
3635 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3637 /* Set RX packet routing to use port/pci function on which the
3638 * packet arrived on in addition to usual frame routing.
3639 * This is helpful on bonding where both interfaces can have
3640 * the same MAC address.
3642 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3643 /* Reroute all packets to our Interface.
3644 * They may have been routed to MPI firmware
3647 value = ql_read32(qdev, MGMT_RCV_CFG);
3648 value &= ~MGMT_RCV_CFG_RM;
3651 /* Sticky reg needs clearing due to WOL. */
3652 ql_write32(qdev, MGMT_RCV_CFG, mask);
3653 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3655 /* Default WOL is enable on Mezz cards */
3656 if (qdev->pdev->subsystem_device == 0x0068 ||
3657 qdev->pdev->subsystem_device == 0x0180)
3658 qdev->wol = WAKE_MAGIC;
3660 /* Start up the rx queues. */
3661 for (i = 0; i < qdev->rx_ring_count; i++) {
3662 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3664 netif_err(qdev, ifup, qdev->ndev,
3665 "Failed to start rx ring[%d].\n", i);
3670 /* If there is more than one inbound completion queue
3671 * then download a RICB to configure RSS.
3673 if (qdev->rss_ring_count > 1) {
3674 status = ql_start_rss(qdev);
3676 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3681 /* Start up the tx queues. */
3682 for (i = 0; i < qdev->tx_ring_count; i++) {
3683 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3685 netif_err(qdev, ifup, qdev->ndev,
3686 "Failed to start tx ring[%d].\n", i);
3691 /* Initialize the port and set the max framesize. */
3692 status = qdev->nic_ops->port_initialize(qdev);
3694 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3696 /* Set up the MAC address and frame routing filter. */
3697 status = ql_cam_route_initialize(qdev);
3699 netif_err(qdev, ifup, qdev->ndev,
3700 "Failed to init CAM/Routing tables.\n");
3704 /* Start NAPI for the RSS queues. */
3705 for (i = 0; i < qdev->rss_ring_count; i++)
3706 napi_enable(&qdev->rx_ring[i].napi);
3711 /* Issue soft reset to chip. */
3712 static int ql_adapter_reset(struct ql_adapter *qdev)
3716 unsigned long end_jiffies;
3718 /* Clear all the entries in the routing table. */
3719 status = ql_clear_routing_entries(qdev);
3721 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3725 /* Check if bit is set then skip the mailbox command and
3726 * clear the bit, else we are in normal reset process.
3728 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3729 /* Stop management traffic. */
3730 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3732 /* Wait for the NIC and MGMNT FIFOs to empty. */
3733 ql_wait_fifo_empty(qdev);
3735 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3737 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3739 end_jiffies = jiffies + usecs_to_jiffies(30);
3741 value = ql_read32(qdev, RST_FO);
3742 if ((value & RST_FO_FR) == 0)
3745 } while (time_before(jiffies, end_jiffies));
3747 if (value & RST_FO_FR) {
3748 netif_err(qdev, ifdown, qdev->ndev,
3749 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3750 status = -ETIMEDOUT;
3753 /* Resume management traffic. */
3754 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3758 static void ql_display_dev_info(struct net_device *ndev)
3760 struct ql_adapter *qdev = netdev_priv(ndev);
3762 netif_info(qdev, probe, qdev->ndev,
3763 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3764 "XG Roll = %d, XG Rev = %d.\n",
3767 qdev->chip_rev_id & 0x0000000f,
3768 qdev->chip_rev_id >> 4 & 0x0000000f,
3769 qdev->chip_rev_id >> 8 & 0x0000000f,
3770 qdev->chip_rev_id >> 12 & 0x0000000f);
3771 netif_info(qdev, probe, qdev->ndev,
3772 "MAC address %pM\n", ndev->dev_addr);
3775 static int ql_wol(struct ql_adapter *qdev)
3778 u32 wol = MB_WOL_DISABLE;
3780 /* The CAM is still intact after a reset, but if we
3781 * are doing WOL, then we may need to program the
3782 * routing regs. We would also need to issue the mailbox
3783 * commands to instruct the MPI what to do per the ethtool
3787 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3788 WAKE_MCAST | WAKE_BCAST)) {
3789 netif_err(qdev, ifdown, qdev->ndev,
3790 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3795 if (qdev->wol & WAKE_MAGIC) {
3796 status = ql_mb_wol_set_magic(qdev, 1);
3798 netif_err(qdev, ifdown, qdev->ndev,
3799 "Failed to set magic packet on %s.\n",
3803 netif_info(qdev, drv, qdev->ndev,
3804 "Enabled magic packet successfully on %s.\n",
3807 wol |= MB_WOL_MAGIC_PKT;
3811 wol |= MB_WOL_MODE_ON;
3812 status = ql_mb_wol_mode(qdev, wol);
3813 netif_err(qdev, drv, qdev->ndev,
3814 "WOL %s (wol code 0x%x) on %s\n",
3815 (status == 0) ? "Successfully set" : "Failed",
3816 wol, qdev->ndev->name);
3822 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3825 /* Don't kill the reset worker thread if we
3826 * are in the process of recovery.
3828 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3829 cancel_delayed_work_sync(&qdev->asic_reset_work);
3830 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3831 cancel_delayed_work_sync(&qdev->mpi_work);
3832 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3833 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3834 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3837 static int ql_adapter_down(struct ql_adapter *qdev)
3843 ql_cancel_all_work_sync(qdev);
3845 for (i = 0; i < qdev->rss_ring_count; i++)
3846 napi_disable(&qdev->rx_ring[i].napi);
3848 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3850 ql_disable_interrupts(qdev);
3852 ql_tx_ring_clean(qdev);
3854 /* Call netif_napi_del() from common point.
3856 for (i = 0; i < qdev->rss_ring_count; i++)
3857 netif_napi_del(&qdev->rx_ring[i].napi);
3859 status = ql_adapter_reset(qdev);
3861 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3863 ql_free_rx_buffers(qdev);
3868 static int ql_adapter_up(struct ql_adapter *qdev)
3872 err = ql_adapter_initialize(qdev);
3874 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3877 set_bit(QL_ADAPTER_UP, &qdev->flags);
3878 ql_alloc_rx_buffers(qdev);
3879 /* If the port is initialized and the
3880 * link is up the turn on the carrier.
3882 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3883 (ql_read32(qdev, STS) & qdev->port_link_up))
3885 /* Restore rx mode. */
3886 clear_bit(QL_ALLMULTI, &qdev->flags);
3887 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3888 qlge_set_multicast_list(qdev->ndev);
3890 /* Restore vlan setting. */
3891 qlge_restore_vlan(qdev);
3893 ql_enable_interrupts(qdev);
3894 ql_enable_all_completion_interrupts(qdev);
3895 netif_tx_start_all_queues(qdev->ndev);
3899 ql_adapter_reset(qdev);
3903 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3905 ql_free_mem_resources(qdev);
3909 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3913 if (ql_alloc_mem_resources(qdev)) {
3914 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3917 status = ql_request_irq(qdev);
3921 static int qlge_close(struct net_device *ndev)
3923 struct ql_adapter *qdev = netdev_priv(ndev);
3926 /* If we hit pci_channel_io_perm_failure
3927 * failure condition, then we already
3928 * brought the adapter down.
3930 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3931 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3932 clear_bit(QL_EEH_FATAL, &qdev->flags);
3937 * Wait for device to recover from a reset.
3938 * (Rarely happens, but possible.)
3940 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3943 /* Make sure refill_work doesn't re-enable napi */
3944 for (i = 0; i < qdev->rss_ring_count; i++)
3945 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3947 ql_adapter_down(qdev);
3948 ql_release_adapter_resources(qdev);
3952 static void qlge_set_lb_size(struct ql_adapter *qdev)
3954 if (qdev->ndev->mtu <= 1500)
3955 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3957 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3958 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3961 static int ql_configure_rings(struct ql_adapter *qdev)
3964 struct rx_ring *rx_ring;
3965 struct tx_ring *tx_ring;
3966 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3968 /* In a perfect world we have one RSS ring for each CPU
3969 * and each has it's own vector. To do that we ask for
3970 * cpu_cnt vectors. ql_enable_msix() will adjust the
3971 * vector count to what we actually get. We then
3972 * allocate an RSS ring for each.
3973 * Essentially, we are doing min(cpu_count, msix_vector_count).
3975 qdev->intr_count = cpu_cnt;
3976 ql_enable_msix(qdev);
3977 /* Adjust the RSS ring count to the actual vector count. */
3978 qdev->rss_ring_count = qdev->intr_count;
3979 qdev->tx_ring_count = cpu_cnt;
3980 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3982 for (i = 0; i < qdev->tx_ring_count; i++) {
3983 tx_ring = &qdev->tx_ring[i];
3984 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3985 tx_ring->qdev = qdev;
3987 tx_ring->wq_len = qdev->tx_ring_size;
3989 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3992 * The completion queue ID for the tx rings start
3993 * immediately after the rss rings.
3995 tx_ring->cq_id = qdev->rss_ring_count + i;
3998 for (i = 0; i < qdev->rx_ring_count; i++) {
3999 rx_ring = &qdev->rx_ring[i];
4000 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4001 rx_ring->qdev = qdev;
4003 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4004 if (i < qdev->rss_ring_count) {
4006 * Inbound (RSS) queues.
4008 rx_ring->cq_len = qdev->rx_ring_size;
4010 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4011 rx_ring->lbq.type = QLGE_LB;
4012 rx_ring->sbq.type = QLGE_SB;
4013 INIT_DELAYED_WORK(&rx_ring->refill_work,
4017 * Outbound queue handles outbound completions only.
4019 /* outbound cq is same size as tx_ring it services. */
4020 rx_ring->cq_len = qdev->tx_ring_size;
4022 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4028 static int qlge_open(struct net_device *ndev)
4031 struct ql_adapter *qdev = netdev_priv(ndev);
4033 err = ql_adapter_reset(qdev);
4037 qlge_set_lb_size(qdev);
4038 err = ql_configure_rings(qdev);
4042 err = ql_get_adapter_resources(qdev);
4046 err = ql_adapter_up(qdev);
4053 ql_release_adapter_resources(qdev);
4057 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4061 /* Wait for an outstanding reset to complete. */
4062 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4065 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4066 netif_err(qdev, ifup, qdev->ndev,
4067 "Waiting for adapter UP...\n");
4072 netif_err(qdev, ifup, qdev->ndev,
4073 "Timed out waiting for adapter UP\n");
4078 status = ql_adapter_down(qdev);
4082 qlge_set_lb_size(qdev);
4084 status = ql_adapter_up(qdev);
4090 netif_alert(qdev, ifup, qdev->ndev,
4091 "Driver up/down cycle failed, closing device.\n");
4092 set_bit(QL_ADAPTER_UP, &qdev->flags);
4093 dev_close(qdev->ndev);
4097 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4099 struct ql_adapter *qdev = netdev_priv(ndev);
4102 if (ndev->mtu == 1500 && new_mtu == 9000) {
4103 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4104 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4105 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4109 queue_delayed_work(qdev->workqueue,
4110 &qdev->mpi_port_cfg_work, 3*HZ);
4112 ndev->mtu = new_mtu;
4114 if (!netif_running(qdev->ndev)) {
4118 status = ql_change_rx_buffers(qdev);
4120 netif_err(qdev, ifup, qdev->ndev,
4121 "Changing MTU failed.\n");
4127 static struct net_device_stats *qlge_get_stats(struct net_device
4130 struct ql_adapter *qdev = netdev_priv(ndev);
4131 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4132 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4133 unsigned long pkts, mcast, dropped, errors, bytes;
4137 pkts = mcast = dropped = errors = bytes = 0;
4138 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4139 pkts += rx_ring->rx_packets;
4140 bytes += rx_ring->rx_bytes;
4141 dropped += rx_ring->rx_dropped;
4142 errors += rx_ring->rx_errors;
4143 mcast += rx_ring->rx_multicast;
4145 ndev->stats.rx_packets = pkts;
4146 ndev->stats.rx_bytes = bytes;
4147 ndev->stats.rx_dropped = dropped;
4148 ndev->stats.rx_errors = errors;
4149 ndev->stats.multicast = mcast;
4152 pkts = errors = bytes = 0;
4153 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4154 pkts += tx_ring->tx_packets;
4155 bytes += tx_ring->tx_bytes;
4156 errors += tx_ring->tx_errors;
4158 ndev->stats.tx_packets = pkts;
4159 ndev->stats.tx_bytes = bytes;
4160 ndev->stats.tx_errors = errors;
4161 return &ndev->stats;
4164 static void qlge_set_multicast_list(struct net_device *ndev)
4166 struct ql_adapter *qdev = netdev_priv(ndev);
4167 struct netdev_hw_addr *ha;
4170 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4174 * Set or clear promiscuous mode if a
4175 * transition is taking place.
4177 if (ndev->flags & IFF_PROMISC) {
4178 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4179 if (ql_set_routing_reg
4180 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4181 netif_err(qdev, hw, qdev->ndev,
4182 "Failed to set promiscuous mode.\n");
4184 set_bit(QL_PROMISCUOUS, &qdev->flags);
4188 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4189 if (ql_set_routing_reg
4190 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4191 netif_err(qdev, hw, qdev->ndev,
4192 "Failed to clear promiscuous mode.\n");
4194 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4200 * Set or clear all multicast mode if a
4201 * transition is taking place.
4203 if ((ndev->flags & IFF_ALLMULTI) ||
4204 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4205 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4206 if (ql_set_routing_reg
4207 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4208 netif_err(qdev, hw, qdev->ndev,
4209 "Failed to set all-multi mode.\n");
4211 set_bit(QL_ALLMULTI, &qdev->flags);
4215 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4216 if (ql_set_routing_reg
4217 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4218 netif_err(qdev, hw, qdev->ndev,
4219 "Failed to clear all-multi mode.\n");
4221 clear_bit(QL_ALLMULTI, &qdev->flags);
4226 if (!netdev_mc_empty(ndev)) {
4227 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4231 netdev_for_each_mc_addr(ha, ndev) {
4232 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4233 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4234 netif_err(qdev, hw, qdev->ndev,
4235 "Failed to loadmulticast address.\n");
4236 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4241 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4242 if (ql_set_routing_reg
4243 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4244 netif_err(qdev, hw, qdev->ndev,
4245 "Failed to set multicast match mode.\n");
4247 set_bit(QL_ALLMULTI, &qdev->flags);
4251 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4254 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4256 struct ql_adapter *qdev = netdev_priv(ndev);
4257 struct sockaddr *addr = p;
4260 if (!is_valid_ether_addr(addr->sa_data))
4261 return -EADDRNOTAVAIL;
4262 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4263 /* Update local copy of current mac address. */
4264 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4266 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4269 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4270 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4272 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4273 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4277 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4279 struct ql_adapter *qdev = netdev_priv(ndev);
4280 ql_queue_asic_error(qdev);
4283 static void ql_asic_reset_work(struct work_struct *work)
4285 struct ql_adapter *qdev =
4286 container_of(work, struct ql_adapter, asic_reset_work.work);
4289 status = ql_adapter_down(qdev);
4293 status = ql_adapter_up(qdev);
4297 /* Restore rx mode. */
4298 clear_bit(QL_ALLMULTI, &qdev->flags);
4299 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4300 qlge_set_multicast_list(qdev->ndev);
4305 netif_alert(qdev, ifup, qdev->ndev,
4306 "Driver up/down cycle failed, closing device\n");
4308 set_bit(QL_ADAPTER_UP, &qdev->flags);
4309 dev_close(qdev->ndev);
4313 static const struct nic_operations qla8012_nic_ops = {
4314 .get_flash = ql_get_8012_flash_params,
4315 .port_initialize = ql_8012_port_initialize,
4318 static const struct nic_operations qla8000_nic_ops = {
4319 .get_flash = ql_get_8000_flash_params,
4320 .port_initialize = ql_8000_port_initialize,
4323 /* Find the pcie function number for the other NIC
4324 * on this chip. Since both NIC functions share a
4325 * common firmware we have the lowest enabled function
4326 * do any common work. Examples would be resetting
4327 * after a fatal firmware error, or doing a firmware
4330 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4334 u32 nic_func1, nic_func2;
4336 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4341 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4342 MPI_TEST_NIC_FUNC_MASK);
4343 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4344 MPI_TEST_NIC_FUNC_MASK);
4346 if (qdev->func == nic_func1)
4347 qdev->alt_func = nic_func2;
4348 else if (qdev->func == nic_func2)
4349 qdev->alt_func = nic_func1;
4356 static int ql_get_board_info(struct ql_adapter *qdev)
4360 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4364 status = ql_get_alt_pcie_func(qdev);
4368 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4370 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4371 qdev->port_link_up = STS_PL1;
4372 qdev->port_init = STS_PI1;
4373 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4374 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4376 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4377 qdev->port_link_up = STS_PL0;
4378 qdev->port_init = STS_PI0;
4379 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4380 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4382 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4383 qdev->device_id = qdev->pdev->device;
4384 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4385 qdev->nic_ops = &qla8012_nic_ops;
4386 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4387 qdev->nic_ops = &qla8000_nic_ops;
4391 static void ql_release_all(struct pci_dev *pdev)
4393 struct net_device *ndev = pci_get_drvdata(pdev);
4394 struct ql_adapter *qdev = netdev_priv(ndev);
4396 if (qdev->workqueue) {
4397 destroy_workqueue(qdev->workqueue);
4398 qdev->workqueue = NULL;
4402 iounmap(qdev->reg_base);
4403 if (qdev->doorbell_area)
4404 iounmap(qdev->doorbell_area);
4405 vfree(qdev->mpi_coredump);
4406 pci_release_regions(pdev);
4409 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4412 struct ql_adapter *qdev = netdev_priv(ndev);
4415 memset((void *)qdev, 0, sizeof(*qdev));
4416 err = pci_enable_device(pdev);
4418 dev_err(&pdev->dev, "PCI device enable failed.\n");
4424 pci_set_drvdata(pdev, ndev);
4426 /* Set PCIe read request size */
4427 err = pcie_set_readrq(pdev, 4096);
4429 dev_err(&pdev->dev, "Set readrq failed.\n");
4433 err = pci_request_regions(pdev, DRV_NAME);
4435 dev_err(&pdev->dev, "PCI region request failed.\n");
4439 pci_set_master(pdev);
4440 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4441 set_bit(QL_DMA64, &qdev->flags);
4442 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4444 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4446 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4450 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4454 /* Set PCIe reset type for EEH to fundamental. */
4455 pdev->needs_freset = 1;
4456 pci_save_state(pdev);
4458 ioremap(pci_resource_start(pdev, 1),
4459 pci_resource_len(pdev, 1));
4460 if (!qdev->reg_base) {
4461 dev_err(&pdev->dev, "Register mapping failed.\n");
4466 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4467 qdev->doorbell_area =
4468 ioremap(pci_resource_start(pdev, 3),
4469 pci_resource_len(pdev, 3));
4470 if (!qdev->doorbell_area) {
4471 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4476 err = ql_get_board_info(qdev);
4478 dev_err(&pdev->dev, "Register access failed.\n");
4482 qdev->msg_enable = netif_msg_init(debug, default_msg);
4483 spin_lock_init(&qdev->stats_lock);
4485 if (qlge_mpi_coredump) {
4486 qdev->mpi_coredump =
4487 vmalloc(sizeof(struct ql_mpi_coredump));
4488 if (!qdev->mpi_coredump) {
4492 if (qlge_force_coredump)
4493 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4495 /* make sure the EEPROM is good */
4496 err = qdev->nic_ops->get_flash(qdev);
4498 dev_err(&pdev->dev, "Invalid FLASH.\n");
4502 /* Keep local copy of current mac address. */
4503 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4505 /* Set up the default ring sizes. */
4506 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4507 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4509 /* Set up the coalescing parameters. */
4510 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4511 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4512 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4513 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4516 * Set up the operating parameters.
4518 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4520 if (!qdev->workqueue) {
4525 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4526 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4527 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4528 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4529 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4530 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4531 init_completion(&qdev->ide_completion);
4532 mutex_init(&qdev->mpi_mutex);
4535 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4536 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4537 DRV_NAME, DRV_VERSION);
4541 ql_release_all(pdev);
4543 pci_disable_device(pdev);
4547 static const struct net_device_ops qlge_netdev_ops = {
4548 .ndo_open = qlge_open,
4549 .ndo_stop = qlge_close,
4550 .ndo_start_xmit = qlge_send,
4551 .ndo_change_mtu = qlge_change_mtu,
4552 .ndo_get_stats = qlge_get_stats,
4553 .ndo_set_rx_mode = qlge_set_multicast_list,
4554 .ndo_set_mac_address = qlge_set_mac_address,
4555 .ndo_validate_addr = eth_validate_addr,
4556 .ndo_tx_timeout = qlge_tx_timeout,
4557 .ndo_set_features = qlge_set_features,
4558 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4559 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4562 static void ql_timer(struct timer_list *t)
4564 struct ql_adapter *qdev = from_timer(qdev, t, timer);
4567 var = ql_read32(qdev, STS);
4568 if (pci_channel_offline(qdev->pdev)) {
4569 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4573 mod_timer(&qdev->timer, jiffies + (5*HZ));
4576 static int qlge_probe(struct pci_dev *pdev,
4577 const struct pci_device_id *pci_entry)
4579 struct net_device *ndev = NULL;
4580 struct ql_adapter *qdev = NULL;
4581 static int cards_found = 0;
4584 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4585 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4589 err = ql_init_device(pdev, ndev, cards_found);
4595 qdev = netdev_priv(ndev);
4596 SET_NETDEV_DEV(ndev, &pdev->dev);
4597 ndev->hw_features = NETIF_F_SG |
4601 NETIF_F_HW_VLAN_CTAG_TX |
4602 NETIF_F_HW_VLAN_CTAG_RX |
4603 NETIF_F_HW_VLAN_CTAG_FILTER |
4605 ndev->features = ndev->hw_features;
4606 ndev->vlan_features = ndev->hw_features;
4607 /* vlan gets same features (except vlan filter) */
4608 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4609 NETIF_F_HW_VLAN_CTAG_TX |
4610 NETIF_F_HW_VLAN_CTAG_RX);
4612 if (test_bit(QL_DMA64, &qdev->flags))
4613 ndev->features |= NETIF_F_HIGHDMA;
4616 * Set up net_device structure.
4618 ndev->tx_queue_len = qdev->tx_ring_size;
4619 ndev->irq = pdev->irq;
4621 ndev->netdev_ops = &qlge_netdev_ops;
4622 ndev->ethtool_ops = &qlge_ethtool_ops;
4623 ndev->watchdog_timeo = 10 * HZ;
4625 /* MTU range: this driver only supports 1500 or 9000, so this only
4626 * filters out values above or below, and we'll rely on
4627 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4629 ndev->min_mtu = ETH_DATA_LEN;
4630 ndev->max_mtu = 9000;
4632 err = register_netdev(ndev);
4634 dev_err(&pdev->dev, "net device registration failed.\n");
4635 ql_release_all(pdev);
4636 pci_disable_device(pdev);
4640 /* Start up the timer to trigger EEH if
4643 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4644 mod_timer(&qdev->timer, jiffies + (5*HZ));
4646 ql_display_dev_info(ndev);
4647 atomic_set(&qdev->lb_count, 0);
4652 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4654 return qlge_send(skb, ndev);
4657 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4659 return ql_clean_inbound_rx_ring(rx_ring, budget);
4662 static void qlge_remove(struct pci_dev *pdev)
4664 struct net_device *ndev = pci_get_drvdata(pdev);
4665 struct ql_adapter *qdev = netdev_priv(ndev);
4666 del_timer_sync(&qdev->timer);
4667 ql_cancel_all_work_sync(qdev);
4668 unregister_netdev(ndev);
4669 ql_release_all(pdev);
4670 pci_disable_device(pdev);
4674 /* Clean up resources without touching hardware. */
4675 static void ql_eeh_close(struct net_device *ndev)
4678 struct ql_adapter *qdev = netdev_priv(ndev);
4680 if (netif_carrier_ok(ndev)) {
4681 netif_carrier_off(ndev);
4682 netif_stop_queue(ndev);
4685 /* Disabling the timer */
4686 ql_cancel_all_work_sync(qdev);
4688 for (i = 0; i < qdev->rss_ring_count; i++)
4689 netif_napi_del(&qdev->rx_ring[i].napi);
4691 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4692 ql_tx_ring_clean(qdev);
4693 ql_free_rx_buffers(qdev);
4694 ql_release_adapter_resources(qdev);
4698 * This callback is called by the PCI subsystem whenever
4699 * a PCI bus error is detected.
4701 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4702 enum pci_channel_state state)
4704 struct net_device *ndev = pci_get_drvdata(pdev);
4705 struct ql_adapter *qdev = netdev_priv(ndev);
4708 case pci_channel_io_normal:
4709 return PCI_ERS_RESULT_CAN_RECOVER;
4710 case pci_channel_io_frozen:
4711 netif_device_detach(ndev);
4712 del_timer_sync(&qdev->timer);
4713 if (netif_running(ndev))
4715 pci_disable_device(pdev);
4716 return PCI_ERS_RESULT_NEED_RESET;
4717 case pci_channel_io_perm_failure:
4719 "%s: pci_channel_io_perm_failure.\n", __func__);
4720 del_timer_sync(&qdev->timer);
4722 set_bit(QL_EEH_FATAL, &qdev->flags);
4723 return PCI_ERS_RESULT_DISCONNECT;
4726 /* Request a slot reset. */
4727 return PCI_ERS_RESULT_NEED_RESET;
4731 * This callback is called after the PCI buss has been reset.
4732 * Basically, this tries to restart the card from scratch.
4733 * This is a shortened version of the device probe/discovery code,
4734 * it resembles the first-half of the () routine.
4736 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4738 struct net_device *ndev = pci_get_drvdata(pdev);
4739 struct ql_adapter *qdev = netdev_priv(ndev);
4741 pdev->error_state = pci_channel_io_normal;
4743 pci_restore_state(pdev);
4744 if (pci_enable_device(pdev)) {
4745 netif_err(qdev, ifup, qdev->ndev,
4746 "Cannot re-enable PCI device after reset.\n");
4747 return PCI_ERS_RESULT_DISCONNECT;
4749 pci_set_master(pdev);
4751 if (ql_adapter_reset(qdev)) {
4752 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4753 set_bit(QL_EEH_FATAL, &qdev->flags);
4754 return PCI_ERS_RESULT_DISCONNECT;
4757 return PCI_ERS_RESULT_RECOVERED;
4760 static void qlge_io_resume(struct pci_dev *pdev)
4762 struct net_device *ndev = pci_get_drvdata(pdev);
4763 struct ql_adapter *qdev = netdev_priv(ndev);
4766 if (netif_running(ndev)) {
4767 err = qlge_open(ndev);
4769 netif_err(qdev, ifup, qdev->ndev,
4770 "Device initialization failed after reset.\n");
4774 netif_err(qdev, ifup, qdev->ndev,
4775 "Device was not running prior to EEH.\n");
4777 mod_timer(&qdev->timer, jiffies + (5*HZ));
4778 netif_device_attach(ndev);
4781 static const struct pci_error_handlers qlge_err_handler = {
4782 .error_detected = qlge_io_error_detected,
4783 .slot_reset = qlge_io_slot_reset,
4784 .resume = qlge_io_resume,
4787 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4789 struct net_device *ndev = pci_get_drvdata(pdev);
4790 struct ql_adapter *qdev = netdev_priv(ndev);
4793 netif_device_detach(ndev);
4794 del_timer_sync(&qdev->timer);
4796 if (netif_running(ndev)) {
4797 err = ql_adapter_down(qdev);
4803 err = pci_save_state(pdev);
4807 pci_disable_device(pdev);
4809 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4815 static int qlge_resume(struct pci_dev *pdev)
4817 struct net_device *ndev = pci_get_drvdata(pdev);
4818 struct ql_adapter *qdev = netdev_priv(ndev);
4821 pci_set_power_state(pdev, PCI_D0);
4822 pci_restore_state(pdev);
4823 err = pci_enable_device(pdev);
4825 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4828 pci_set_master(pdev);
4830 pci_enable_wake(pdev, PCI_D3hot, 0);
4831 pci_enable_wake(pdev, PCI_D3cold, 0);
4833 if (netif_running(ndev)) {
4834 err = ql_adapter_up(qdev);
4839 mod_timer(&qdev->timer, jiffies + (5*HZ));
4840 netif_device_attach(ndev);
4844 #endif /* CONFIG_PM */
4846 static void qlge_shutdown(struct pci_dev *pdev)
4848 qlge_suspend(pdev, PMSG_SUSPEND);
4851 static struct pci_driver qlge_driver = {
4853 .id_table = qlge_pci_tbl,
4854 .probe = qlge_probe,
4855 .remove = qlge_remove,
4857 .suspend = qlge_suspend,
4858 .resume = qlge_resume,
4860 .shutdown = qlge_shutdown,
4861 .err_handler = &qlge_err_handler
4864 module_pci_driver(qlge_driver);